summaryrefslogtreecommitdiff
path: root/ghc
diff options
context:
space:
mode:
authorpartain <unknown>1996-07-25 21:33:42 +0000
committerpartain <unknown>1996-07-25 21:33:42 +0000
commit5eb1c77c795f92ed0f4c8023847e9d4be1a4fd0d (patch)
treeadb07110e00f00b2b2ef6365e16d5f58b260ce3c /ghc
parentf7ecf7234c224489be8a5e63fced903b655d92ee (diff)
downloadhaskell-5eb1c77c795f92ed0f4c8023847e9d4be1a4fd0d.tar.gz
[project @ 1996-07-25 20:43:49 by partain]
Bulk of final changes for 2.01
Diffstat (limited to 'ghc')
-rw-r--r--ghc/CONTRIB/README4
-rw-r--r--ghc/CONTRIB/haskell-modes/README6
-rw-r--r--ghc/README64
-rw-r--r--ghc/compiler/Jmakefile4
-rw-r--r--ghc/compiler/absCSyn/Costs.lhs1
-rw-r--r--ghc/compiler/basicTypes/Id.lhs27
-rw-r--r--ghc/compiler/basicTypes/UniqSupply.lhs3
-rw-r--r--ghc/compiler/coreSyn/CoreLint.lhs10
-rw-r--r--ghc/compiler/coreSyn/CoreUnfold.lhs26
-rw-r--r--ghc/compiler/coreSyn/CoreUtils.lhs9
-rw-r--r--ghc/compiler/coreSyn/NmbrCore.lhs163
-rw-r--r--ghc/compiler/deSugar/DsExpr.lhs2
-rw-r--r--ghc/compiler/hsSyn/HsMatches.lhs3
-rw-r--r--ghc/compiler/nativeGen/StixInteger.lhs2
-rw-r--r--ghc/compiler/prelude/PrelVals.lhs3
-rw-r--r--ghc/compiler/prelude/PrimOp.lhs2
-rw-r--r--ghc/compiler/rename/Rename.lhs2
-rw-r--r--ghc/compiler/rename/RnNames.lhs5
-rw-r--r--ghc/compiler/simplCore/BinderInfo.lhs90
-rw-r--r--ghc/compiler/simplCore/FoldrBuildWW.lhs4
-rw-r--r--ghc/compiler/simplCore/MagicUFs.lhs11
-rw-r--r--ghc/compiler/simplCore/SimplCase.lhs6
-rw-r--r--ghc/compiler/simplCore/SimplEnv.lhs75
-rw-r--r--ghc/compiler/simplCore/SimplVar.lhs1
-rw-r--r--ghc/compiler/simplCore/Simplify.lhs146
-rw-r--r--ghc/compiler/specialise/Specialise.lhs3
-rw-r--r--ghc/compiler/stranal/WorkWrap.lhs19
-rw-r--r--ghc/compiler/typecheck/TcExpr.lhs3
-rw-r--r--ghc/compiler/typecheck/TcInstDcls.lhs15
-rw-r--r--ghc/compiler/typecheck/TcInstUtil.lhs5
-rw-r--r--ghc/compiler/typecheck/TcModule.lhs12
-rw-r--r--ghc/compiler/types/Class.lhs18
-rw-r--r--ghc/compiler/types/PprType.lhs1
-rw-r--r--ghc/compiler/types/Type.lhs5
-rw-r--r--ghc/compiler/utils/Bag.lhs16
-rw-r--r--ghc/compiler/utils/FiniteMap.lhs4
-rw-r--r--ghc/compiler/utils/ListSetOps.lhs4
-rw-r--r--ghc/compiler/utils/Maybes.lhs9
-rw-r--r--ghc/compiler/utils/Pretty.lhs6
-rw-r--r--ghc/compiler/utils/Util.lhs29
-rw-r--r--ghc/docs/ANNOUNCE-0.06116
-rw-r--r--ghc/docs/ANNOUNCE-0.10135
-rw-r--r--ghc/docs/ANNOUNCE-0.16146
-rw-r--r--ghc/docs/ANNOUNCE-0.19130
-rw-r--r--ghc/docs/ANNOUNCE-0.2055
-rw-r--r--ghc/docs/ANNOUNCE-0.22109
-rw-r--r--ghc/docs/ANNOUNCE-0.23124
-rw-r--r--ghc/docs/ANNOUNCE-0.2554
-rw-r--r--ghc/docs/ANNOUNCE-0.26153
-rw-r--r--ghc/docs/ANNOUNCE-0.2772
-rw-r--r--ghc/docs/Jmakefile10
-rw-r--r--ghc/docs/NOTES.adding-PrimOp51
-rw-r--r--ghc/docs/NOTES.arbitary-ints54
-rw-r--r--ghc/docs/NOTES.c-optimisation2361
-rw-r--r--ghc/docs/NOTES.core-overview94
-rw-r--r--ghc/docs/NOTES.desugar323
-rw-r--r--ghc/docs/NOTES.garbage.collection206
-rw-r--r--ghc/docs/NOTES.import90
-rw-r--r--ghc/docs/NOTES.interface54
-rw-r--r--ghc/docs/NOTES.mkworld248
-rw-r--r--ghc/docs/NOTES.part-of-book73
-rw-r--r--ghc/docs/NOTES.rename109
-rw-r--r--ghc/docs/NOTES.saving-space250
-rw-r--r--ghc/docs/NOTES.update-mechanism195
-rw-r--r--ghc/docs/Prefix_Form294
-rw-r--r--ghc/docs/README48
-rw-r--r--ghc/docs/abstracts/README4
-rw-r--r--ghc/docs/abstracts/abstracts.sty30
-rw-r--r--ghc/docs/abstracts/abstracts89.tex487
-rw-r--r--ghc/docs/abstracts/abstracts90.tex153
-rw-r--r--ghc/docs/abstracts/abstracts91.tex232
-rw-r--r--ghc/docs/abstracts/abstracts92.tex292
-rw-r--r--ghc/docs/abstracts/abstracts93.tex326
-rw-r--r--ghc/docs/abstracts/abstracts94.tex187
-rw-r--r--ghc/docs/abstracts/before90.tex471
-rw-r--r--ghc/docs/abstracts/reports.tex111
-rw-r--r--ghc/docs/abstracts/slpj.sty41
-rw-r--r--ghc/docs/abstracts/useful.sty186
-rw-r--r--ghc/docs/add_to_compiler/Jmakefile22
-rw-r--r--ghc/docs/add_to_compiler/back-end.verb41
-rw-r--r--ghc/docs/add_to_compiler/core-summary-fig.verb45
-rw-r--r--ghc/docs/add_to_compiler/core-syntax.verb139
-rw-r--r--ghc/docs/add_to_compiler/front-end.verb297
-rw-r--r--ghc/docs/add_to_compiler/howto-add.verb357
-rw-r--r--ghc/docs/add_to_compiler/overview-fig.fig136
-rw-r--r--ghc/docs/add_to_compiler/overview.verb70
-rw-r--r--ghc/docs/add_to_compiler/paper.bbl72
-rw-r--r--ghc/docs/add_to_compiler/paper.verb128
-rw-r--r--ghc/docs/add_to_compiler/slides-root.tex8
-rw-r--r--ghc/docs/add_to_compiler/slides.tex86
-rw-r--r--ghc/docs/add_to_compiler/state-of-play.NOTES73
-rw-r--r--ghc/docs/add_to_compiler/state-of-play.verb14
-rw-r--r--ghc/docs/add_to_compiler/stg-summary-fig.verb55
-rw-r--r--ghc/docs/install_guide/Jmakefile4
-rw-r--r--ghc/docs/install_guide/installing.lit300
-rw-r--r--ghc/docs/release_notes/0-02-notes.lit230
-rw-r--r--ghc/docs/release_notes/0-03-README47
-rw-r--r--ghc/docs/release_notes/0-04-README15
-rw-r--r--ghc/docs/release_notes/0-05-notes.lit86
-rw-r--r--ghc/docs/release_notes/0-06-notes.lit266
-rw-r--r--ghc/docs/release_notes/0-07-README4
-rw-r--r--ghc/docs/release_notes/0-07-notes.lit51
-rw-r--r--ghc/docs/release_notes/0-08-notes.lit149
-rw-r--r--ghc/docs/release_notes/0-10-notes.lit72
-rw-r--r--ghc/docs/release_notes/0-16-notes.lit106
-rw-r--r--ghc/docs/release_notes/0-17-notes.lit1
-rw-r--r--ghc/docs/release_notes/0-18-README63
-rw-r--r--ghc/docs/release_notes/0-19-notes.lit187
-rw-r--r--ghc/docs/release_notes/0-22-notes.lit205
-rw-r--r--ghc/docs/release_notes/0-23-notes.lit253
-rw-r--r--ghc/docs/release_notes/0-26-notes.lit244
-rw-r--r--ghc/docs/release_notes/2-01-notes.lit202
-rw-r--r--ghc/docs/release_notes/Jmakefile3
-rw-r--r--ghc/docs/release_notes/release.lit69
-rw-r--r--ghc/docs/simple-monad.lhs2
-rw-r--r--ghc/docs/state_interface/Jmakefile7
-rw-r--r--ghc/docs/state_interface/state-interface.verb313
-rw-r--r--ghc/docs/users_guide/Jmakefile3
-rw-r--r--ghc/docs/users_guide/backwards.lit279
-rw-r--r--ghc/docs/users_guide/glasgow_exts.lit318
-rw-r--r--ghc/docs/users_guide/gone_wrong.lit71
-rw-r--r--ghc/docs/users_guide/how_to_run.lit263
-rw-r--r--ghc/docs/users_guide/intro.lit54
-rw-r--r--ghc/docs/users_guide/libraries.lit318
-rw-r--r--ghc/docs/users_guide/parallel.lit180
-rw-r--r--ghc/docs/users_guide/prof-compiler-options.lit10
-rw-r--r--ghc/docs/users_guide/prof-rts-options.lit8
-rw-r--r--ghc/docs/users_guide/profiling.lit8
-rw-r--r--ghc/docs/users_guide/recomp.lit41
-rw-r--r--ghc/docs/users_guide/runtime_control.lit52
-rw-r--r--ghc/docs/users_guide/sooner.lit36
-rw-r--r--ghc/docs/users_guide/user.lit8
-rw-r--r--ghc/docs/users_guide/utils.lit11
-rw-r--r--ghc/docs/users_guide/vs_haskell.lit522
-rw-r--r--ghc/driver/Jmakefile1
-rw-r--r--ghc/driver/ghc-asm.lprl97
-rw-r--r--ghc/driver/ghc-iface.lprl49
-rw-r--r--ghc/driver/ghc-recomp.lprl6
-rw-r--r--ghc/driver/ghc.lprl162
-rw-r--r--ghc/driver/test_mangler6
-rw-r--r--ghc/includes/COptRegs.lh16
-rw-r--r--ghc/includes/COptWraps.lh22
-rw-r--r--ghc/includes/CostCentre.lh10
-rw-r--r--ghc/includes/GranSim.lh22
-rw-r--r--ghc/includes/LLC.h2
-rw-r--r--ghc/includes/Parallel.lh5
-rw-r--r--ghc/includes/RtsTypes.lh4
-rw-r--r--ghc/includes/Threads.lh3
-rw-r--r--ghc/includes/c-as-asm.lit509
-rw-r--r--ghc/includes/error.h3
-rw-r--r--ghc/includes/ieee-flpt.h22
-rw-r--r--ghc/includes/libposix.h57
-rw-r--r--ghc/includes/platform.h.in36
-rw-r--r--ghc/includes/root.lit89
-rw-r--r--ghc/includes/stgdefs.h2
-rw-r--r--ghc/includes/timezone.h31
-rw-r--r--ghc/lib/Jmakefile125
-rw-r--r--ghc/lib/MODULES26
-rw-r--r--ghc/lib/cbits/Jmakefile68
-rw-r--r--ghc/lib/cbits/getLock.lc138
-rw-r--r--ghc/lib/cbits/inputReady.lc87
-rw-r--r--ghc/lib/cbits/openFile.lc209
-rw-r--r--ghc/lib/cbits/readFile.lc102
-rw-r--r--ghc/lib/cbits/removeDirectory.lc57
-rw-r--r--ghc/lib/cbits/removeFile.lc48
-rw-r--r--ghc/lib/cbits/renameDirectory.lc48
-rw-r--r--ghc/lib/cbits/renameFile.lc132
-rw-r--r--ghc/lib/cbits/seekFile.lc135
-rw-r--r--ghc/lib/cbits/setBuffering.lc123
-rw-r--r--ghc/lib/cbits/setCurrentDirectory.lc25
-rw-r--r--ghc/lib/cbits/stgio.h35
-rw-r--r--ghc/lib/cbits/system.lc65
-rw-r--r--ghc/lib/cbits/writeFile.lc38
-rw-r--r--ghc/lib/concurrent/Merge.hs11
-rw-r--r--ghc/lib/concurrent/Parallel.hs1
-rw-r--r--ghc/lib/prelude/GHCbase.hs85
-rw-r--r--ghc/lib/prelude/GHCerr.hs66
-rw-r--r--ghc/lib/prelude/Main.mc_hi5
-rw-r--r--ghc/lib/prelude/Main.mg_hi5
-rw-r--r--ghc/lib/prelude/Main.mp_hi5
-rw-r--r--ghc/lib/prelude/Main.p_hi5
-rw-r--r--ghc/lib/prelude/Prelude.hs64
-rw-r--r--ghc/lib/prelude/PreludeGlaST.hs9
-rw-r--r--ghc/lib/required/List.hs4
-rw-r--r--ghc/misc/spat-analysers/README22
-rw-r--r--ghc/misc/spat-analysers/REGSTATS18
-rw-r--r--ghc/misc/spat-analysers/StgRegAddrs.h19
-rw-r--r--ghc/misc/spat-analysers/icount.c91
-rw-r--r--ghc/misc/spat-analysers/icount_by_activity.c396
-rw-r--r--ghc/misc/spat-analysers/makefile19
-rw-r--r--ghc/misc/spat-analysers/show_icounts354
-rw-r--r--ghc/misc/spat-analysers/spatmain.c243
-rw-r--r--ghc/misc/spat-analysers/stgregs.c121
-rw-r--r--ghc/mkworld/GHC_OPTS90
-rw-r--r--ghc/mkworld/macros-ghc.jm2
-rw-r--r--ghc/mkworld/root.lit10
-rw-r--r--ghc/runtime/Jmakefile185
-rw-r--r--ghc/runtime/c-as-asm/FreeMallocPtr.lc22
-rw-r--r--ghc/runtime/c-as-asm/HpOverflow.lc85
-rw-r--r--ghc/runtime/gum/LLComms.lc4
-rw-r--r--ghc/runtime/gum/Pack.lc2
-rw-r--r--ghc/runtime/gum/ParInit.lc2
-rw-r--r--ghc/runtime/gum/RBH.lc28
-rw-r--r--ghc/runtime/gum/SysMan.lc2
-rw-r--r--ghc/runtime/hooks/ErrorHdr.lc3
-rw-r--r--ghc/runtime/hooks/FreeForeignObj.lc18
-rw-r--r--ghc/runtime/hooks/NoRunnableThrds.lc2
-rw-r--r--ghc/runtime/hooks/OutOfHeap.lc5
-rw-r--r--ghc/runtime/hooks/OutOfStk.lc3
-rw-r--r--ghc/runtime/hooks/OutOfVM.lc4
-rw-r--r--ghc/runtime/hooks/PatErrorHdr.lc3
-rw-r--r--ghc/runtime/hooks/TraceHooks.lc6
-rw-r--r--ghc/runtime/main/GranSim.lc57
-rw-r--r--ghc/runtime/main/RtsFlags.lc23
-rw-r--r--ghc/runtime/main/Signals.lc4
-rw-r--r--ghc/runtime/main/StgUpdate.lhc8
-rw-r--r--ghc/runtime/main/Threads.lc79
-rw-r--r--ghc/runtime/main/Ticky.lc6
-rw-r--r--ghc/runtime/storage/SM1s.lc4
-rw-r--r--ghc/runtime/storage/SM2s.lc4
-rw-r--r--ghc/runtime/storage/SMap.lc45
-rw-r--r--ghc/runtime/storage/SMcopying.lc4
-rw-r--r--ghc/runtime/storage/SMmark.lhc2
-rw-r--r--ghc/runtime/storage/SMmarking.lc5
-rw-r--r--ghc/utils/Jmakefile5
-rw-r--r--ghc/utils/hp2ps/TraceElement.h2
-rw-r--r--ghc/utils/hstags/README4
-rw-r--r--ghc/utils/mkdependHS/mkdependHS.prl120
-rw-r--r--ghc/utils/parallel/AVG.pl108
-rw-r--r--ghc/utils/parallel/GrAnSim.el432
-rw-r--r--ghc/utils/parallel/Jmakefile62
-rw-r--r--ghc/utils/parallel/RTS2gran.pl684
-rw-r--r--ghc/utils/parallel/SN.pl280
-rw-r--r--ghc/utils/parallel/SPLIT.pl379
-rw-r--r--ghc/utils/parallel/aux.pl89
-rw-r--r--ghc/utils/parallel/avg-RTS.pl15
-rw-r--r--ghc/utils/parallel/get_SN.pl40
-rw-r--r--ghc/utils/parallel/gp-ext-imp.pl86
-rw-r--r--ghc/utils/parallel/gr2RTS.pl138
-rw-r--r--ghc/utils/parallel/gr2ap.bash124
-rw-r--r--ghc/utils/parallel/gr2gran.bash113
-rw-r--r--ghc/utils/parallel/gr2java.pl322
-rw-r--r--ghc/utils/parallel/gr2jv.bash123
-rw-r--r--ghc/utils/parallel/gr2pe.pl1434
-rw-r--r--ghc/utils/parallel/gr2ps.bash135
-rw-r--r--ghc/utils/parallel/gr2qp.pl290
-rw-r--r--ghc/utils/parallel/gran-extr.pl2114
-rw-r--r--ghc/utils/parallel/grs2gr.pl9
-rw-r--r--ghc/utils/parallel/ps-scale-y.pl188
-rw-r--r--ghc/utils/parallel/qp2ap.pl495
-rw-r--r--ghc/utils/parallel/qp2ps.pl687
-rw-r--r--ghc/utils/parallel/sn_filter.pl92
-rw-r--r--ghc/utils/parallel/stats.pl168
-rw-r--r--ghc/utils/parallel/template.pl141
-rw-r--r--ghc/utils/parallel/tf.pl148
-rw-r--r--ghc/utils/pvm/README3
256 files changed, 13015 insertions, 16186 deletions
diff --git a/ghc/CONTRIB/README b/ghc/CONTRIB/README
index df029e90e4..4adab28b20 100644
--- a/ghc/CONTRIB/README
+++ b/ghc/CONTRIB/README
@@ -5,10 +5,12 @@ fptags Denis Howe <dbh@doc.ic.ac.uk>
Bourne-shell script.
Create an emacs tags file for one or more functional programs.
-haskell.el A Haskell mode from Simon Marlow <simonm@dcs.glasgow.ac.uk>.
+haskell-modes/ A collection of all known "Haskell modes" for GNU Emacs.
haskel.gif Provided by Lennart Augustsson <augustss@cs.chalmers.se>
+haskell_poem Speaks for itself.
+
mira2hs Denis Howe <dbh@doc.ic.ac.uk>
Bourne-shell script.
Convert Miranda code to Haskell, more-or-less.
diff --git a/ghc/CONTRIB/haskell-modes/README b/ghc/CONTRIB/haskell-modes/README
index 248460d211..c931787996 100644
--- a/ghc/CONTRIB/haskell-modes/README
+++ b/ghc/CONTRIB/haskell-modes/README
@@ -7,7 +7,7 @@ include advertising or testimonials from happy users if they send them
along...
Will Partain
-partain@dcs.glasgow.ac.uk
+partain@dcs.gla.ac.uk
95/12/05
=======================================================================
@@ -20,7 +20,7 @@ partain@dcs.glasgow.ac.uk
chalmers/thiemann -- Peter Thiemann added "indentation stuff"
and fontification -- version 0.2.
- chalmers/sof -- Sigbjorn Finne's <sof@dcs.glasgow.ac.uk> hacked
+ chalmers/sof -- Sigbjorn Finne's <sof@dcs.gla.ac.uk> hacked
version of Thiemann's.
.......................................................................
@@ -52,4 +52,4 @@ partain@dcs.glasgow.ac.uk
yale/chak : "extended by Manuel M.T. Chakravarty with rudimentary
editing features (including better syntax table) and support
for the font-lock-mode." Via Hans Wolfgang Loidl
- <hwloidl@dcs.glasgow.ac.uk>
+ <hwloidl@dcs.gla.ac.uk>
diff --git a/ghc/README b/ghc/README
index ea726dfa14..8dfc8904a6 100644
--- a/ghc/README
+++ b/ghc/README
@@ -1,12 +1,16 @@
This is version 2.01 of the Glorious Glasgow Haskell compilation
-system (GHC). This is a major public release. The top-level file
-"ANNOUNCE-0.28" says more.
+system (GHC). GHC 2.01 is a compiler for Haskell 1.3.
-Haskell is "the" standard lazy functional programming language [see
-SIGPLAN Notices, May 1992]. Some general merits of GHC are given at
-the end of this file.
+2.01 is a full GHC release; however, as the first release of the 1.3
+compiler, it is "test" quality; it very well may have serious bugs.
+The top-level file "ANNOUNCE-2.01" says more.
-Documentation of interest:
+Haskell is "the" standard lazy functional programming language.
+Haskell 1.3 is the current version of the language, released in
+May. 1996. The language definition is on the Web at
+http://haskell.cs.yale.edu/haskell-report/haskell-report.html
+
+GHC documentation of interest:
* docs/install_guide/installing.{dvi,info,html}: How to configure,
build, and install the system.
@@ -29,50 +33,54 @@ do not suffer or grumble in silence. The "bug reports" section of the
User's Guide (docs/users_guide/user.{dvi,info,html}) says what we
would like to know when you report a problem.
-Current AQUA team (all @dcs.glasgow.ac.uk):
+Current AQUA team (all @dcs.gla.ac.uk):
Sigbjorn Finne (sof) [PhD student]
- Andy Gill (andy) [PhD student]
- Kevin Hammond (kh) [GRASP; now a research fellow]
+ Hans Wolfgang Loidl (hwloidl) [PhD student]
Simon Marlow (simonm) [PhD student]
- Darren Moffat (moffatd) [slave, summer '95]
Will Partain (partain) [hired hand, GRASP/AQUA]
Simon Peyton Jones (simonpj) [our Fearless Leader]
- Patrick Sansom (sansom) [hired hand, "Bidirectional Analyses"]
- Andr\'e Santos (andre) [PhD student]
+ Patrick Sansom (sansom) [hired hand, Bidirectional Analyses]
+ Phil Trinder (trinder) [hired hand, Parade]
+ David N Turner (dnt) [hired hand, Linear Types]
Past contributors and/or continuing advisors:
- Cordy Hall (cvh) [GRASP; now at Open University]
- John Launchbury (jl) [AQUA; now at OGI]
- Jim Mattson (mattson) [hired hand, AQUA; just moved to HP]
+
+ Andy Gill (andy) [PhD student; at HP]
+ Cordy Hall (cvh) [GRASP]
+ Kevin Hammond (kh) [GRASP; at St. Andrews]
+ John Launchbury (jl) [AQUA; at OGI]
+ Jim Mattson (mattson) [hired hand, AQUA; HP]
+ Darren Moffat (moffatd) [slave, summer '95; at MoD]
Bryan O'Sullivan (bos) [visiting slave, summer '94; at Sun]
- Alastair Reid (areid) [GHCI god, now working at Yale]
- Phil Wadler (wadler) [GRASP]
+ Alastair Reid (areid) [GHCI god, at Yale]
+ Andr\'e Santos (andre) [PhD student; back in Brazil]
+ Phil Wadler (wadler) [GRASP; at Lucent soon]
Cool people who've let us use their machines:
+
hppa1.1-hp-hpux Sam Nelson, Stirling University
- mips-sgi-irix5 Tim Niblett, Turing Institute, Glasgow
- sparc-sun-solaris2 Durham University
+ powerpc-ibm-aix Walter Robinson, Mechanical Eng'g, Glasgow U.
Simon's projects' acronyms:
GRIP ('87-'90): Graph reduction in parallel
GRASP ('90-'92): Graph reduction applications support project
AQUA ('93- ): Declarative systems architecture: a quantitative approach
-Dated: 95/07/24
+Dated: 96/07
-GHC WWW page: http://www.dcs.glasgow.ac.uk/fp/software/ghc.html
+GHC WWW page: http://www.dcs.gla.ac.uk/fp/software/ghc.html
E-mail contacts:
- glasgow-haskell-request@dcs.glasgow.ac.uk (general queries)
+ glasgow-haskell-request@dcs.gla.ac.uk (admin & general queries)
- glasgow-haskell-bugs@dcs.glasgow.ac.uk (bug reports mailing list)
- glasgow-haskell-users@dcs.glasgow.ac.uk (users' mailing list)
+ glasgow-haskell-bugs@dcs.gla.ac.uk (bug reports mailing list)
+ glasgow-haskell-users@dcs.gla.ac.uk (users' mailing list)
- glasgow-haskell-bugs-request@... to join, send mail *here*
- glasgow-haskell-users-request@... to join, send mail *here*
+ glasgow-haskell-bugs-request@... to join, send mail *here*
+ glasgow-haskell-users-request@... to join, send mail *here*
-Anonymous FTP site: ftp.dcs.glasgow.ac.uk:pub/haskell/glasgow. Mostly
-mirrored by ftp.cs.chalmers.se and nebula.cs.yale.edu (same
+Anonymous FTP site: ftp.dcs.gla.ac.uk:pub/haskell/glasgow. Mostly
+mirrored by ftp.cs.chalmers.se and haskell.cs.yale.edu (same
directory). Also: src.doc.ic.ac.uk, in
computing/programming/languages/haskell/glasgow/.
diff --git a/ghc/compiler/Jmakefile b/ghc/compiler/Jmakefile
index 7bc091c90d..925f261c8a 100644
--- a/ghc/compiler/Jmakefile
+++ b/ghc/compiler/Jmakefile
@@ -819,8 +819,8 @@ NormalLibraryTarget(hsp,$(HSP_OBJS_O))
/* BuildPgmFromCFiles(hsp,parser/printtree.o parser/main.o,,libhsp.a) */
#if DoInstallGHCSystem == YES
-MakeDirectories(install, $(INSTLIBDIR_GHC))
-InstallBinaryTarget(hsp,$(INSTLIBDIR_GHC))
+/* MakeDirectories(install, $(INSTLIBDIR_GHC)) */
+/* InstallBinaryTarget(hsp,$(INSTLIBDIR_GHC)) */
#endif /* DoInstall... */
YaccRunWithExpectMsg(parser/hsparser,12,0)
diff --git a/ghc/compiler/absCSyn/Costs.lhs b/ghc/compiler/absCSyn/Costs.lhs
index c32b010b68..5c03e36d6d 100644
--- a/ghc/compiler/absCSyn/Costs.lhs
+++ b/ghc/compiler/absCSyn/Costs.lhs
@@ -368,6 +368,7 @@ stmtMacroCosts macro modes =
GRAN_RESCHEDULE -> nullCosts {- GrAnSim bookkeeping -}
GRAN_FETCH_AND_RESCHEDULE -> nullCosts {- GrAnSim bookkeeping -}
THREAD_CONTEXT_SWITCH -> nullCosts {- GrAnSim bookkeeping -}
+ _ -> trace "Costs.stmtMacroCosts" nullCosts
-- ---------------------------------------------------------------------------
diff --git a/ghc/compiler/basicTypes/Id.lhs b/ghc/compiler/basicTypes/Id.lhs
index 70963624a9..79313ba581 100644
--- a/ghc/compiler/basicTypes/Id.lhs
+++ b/ghc/compiler/basicTypes/Id.lhs
@@ -40,6 +40,7 @@ module Id (
idType,
idUnique,
+ dataConRepType,
dataConArgTys,
dataConArity,
dataConNumFields,
@@ -107,6 +108,7 @@ module Id (
getIdUpdateInfo,
getPragmaInfo,
replaceIdInfo,
+ addInlinePragma,
-- IdEnvs AND IdSets
SYN_IE(IdEnv), SYN_IE(GenIdSet), SYN_IE(IdSet),
@@ -169,7 +171,7 @@ import MatchEnv ( MatchEnv )
import SrcLoc ( mkBuiltinSrcLoc )
import TyCon ( TyCon, mkTupleTyCon, tyConDataCons )
import Type ( mkSigmaTy, mkTyVarTys, mkFunTys, mkDictTy,
- applyTyCon, instantiateTy,
+ applyTyCon, instantiateTy, mkForAllTys,
tyVarsOfType, applyTypeEnvToTy, typePrimRep,
GenType, SYN_IE(ThetaType), SYN_IE(TauType), SYN_IE(Type)
)
@@ -816,6 +818,10 @@ idWantsToBeINLINEd :: Id -> Bool
idWantsToBeINLINEd (Id _ _ _ _ IWantToBeINLINEd _) = True
idWantsToBeINLINEd _ = False
+
+addInlinePragma :: Id -> Id
+addInlinePragma (Id u sn ty details _ info)
+ = Id u sn ty details IWantToBeINLINEd info
\end{code}
For @unlocaliseId@: See the brief commentary in
@@ -1392,6 +1398,25 @@ dataConSig (Id _ _ _ (TupleConId arity) _ _)
tyvars = take arity alphaTyVars
tyvar_tys = mkTyVarTys tyvars
+
+-- dataConRepType returns the type of the representation of a contructor
+-- This may differ from the type of the contructor Id itself for two reasons:
+-- a) the constructor Id may be overloaded, but the dictionary isn't stored
+-- b) the constructor may store an unboxed version of a strict field.
+-- Here's an example illustrating both:
+-- data Ord a => T a = MkT Int! a
+-- Here
+-- T :: Ord a => Int -> a -> T a
+-- but the rep type is
+-- Trep :: Int# -> a -> T a
+-- Actually, the unboxed part isn't implemented yet!
+
+dataConRepType :: GenId (GenType tv u) -> GenType tv u
+dataConRepType con
+ = mkForAllTys tyvars tau
+ where
+ (tyvars, theta, tau) = splitSigmaTy (idType con)
+
dataConFieldLabels :: DataCon -> [FieldLabel]
dataConFieldLabels (Id _ _ _ (DataConId _ _ fields _ _ _ _) _ _) = fields
dataConFieldLabels (Id _ _ _ (TupleConId _) _ _) = []
diff --git a/ghc/compiler/basicTypes/UniqSupply.lhs b/ghc/compiler/basicTypes/UniqSupply.lhs
index 88ac980af2..3cb2ca724e 100644
--- a/ghc/compiler/basicTypes/UniqSupply.lhs
+++ b/ghc/compiler/basicTypes/UniqSupply.lhs
@@ -103,7 +103,8 @@ mkSplitUniqSupply (C# c#)
returnPrimIO (I# (w2i (mask# `or#` u#)))
in
#if __GLASGOW_HASKELL__ >= 200
- primIOToIO mk_supply#
+ primIOToIO mk_supply# >>= \ s ->
+ return s
#else
mk_supply# `thenPrimIO` \ s ->
return s
diff --git a/ghc/compiler/coreSyn/CoreLint.lhs b/ghc/compiler/coreSyn/CoreLint.lhs
index f72c11e424..99afabcf14 100644
--- a/ghc/compiler/coreSyn/CoreLint.lhs
+++ b/ghc/compiler/coreSyn/CoreLint.lhs
@@ -18,7 +18,7 @@ import CoreSyn
import Bag
import Kind ( hasMoreBoxityInfo, Kind{-instance-} )
import Literal ( literalType, Literal{-instance-} )
-import Id ( idType, isBottomingId,
+import Id ( idType, isBottomingId, dataConRepType,
dataConArgTys, GenId{-instances-},
emptyIdSet, mkIdSet, intersectIdSets,
unionIdSets, elementOfIdSet, SYN_IE(IdSet)
@@ -198,14 +198,8 @@ lintCoreExpr (Let binds body)
(addInScopeVars binders (lintCoreExpr body))
lintCoreExpr e@(Con con args)
- = lintCoreArgs {-False-} e unoverloaded_ty args
+ = lintCoreArgs {-False-} e (dataConRepType con) args
-- Note: we don't check for primitive types in these arguments
- where
- -- Constructors are special in that they aren't passed their
- -- dictionary arguments, so we swizzle them out of the
- -- constructor type before handing over to lintCorArgs
- unoverloaded_ty = mkForAllTys tyvars tau
- (tyvars, theta, tau) = splitSigmaTy (idType con)
lintCoreExpr e@(Prim op args)
= lintCoreArgs {-True-} e (primOpType op) args
diff --git a/ghc/compiler/coreSyn/CoreUnfold.lhs b/ghc/compiler/coreSyn/CoreUnfold.lhs
index c45c4989aa..247e969fde 100644
--- a/ghc/compiler/coreSyn/CoreUnfold.lhs
+++ b/ghc/compiler/coreSyn/CoreUnfold.lhs
@@ -48,7 +48,7 @@ import IdInfo ( arityMaybe, bottomIsGuaranteed )
import Literal ( isNoRepLit, isLitLitLit )
import Pretty
import TyCon ( tyConFamilySize )
-import Type ( getAppDataTyConExpandingDicts )
+import Type ( maybeAppDataTyConExpandingDicts )
import UniqSet ( emptyUniqSet, unitUniqSet, mkUniqSet,
addOneToUniqSet, unionUniqSets
)
@@ -229,10 +229,16 @@ calcUnfoldingGuidance scc_s_OK bOMB_OUT_SIZE expr
(length val_binders)
(map discount_for val_binders)
size
- discount_for b | b `is_elem` cased_args = tyConFamilySize tycon
- | otherwise = 0
- where
- (tycon, _, _) = getAppDataTyConExpandingDicts (idType b)
+
+ discount_for b
+ | is_data && b `is_elem` cased_args = tyConFamilySize tycon
+ | otherwise = 0
+ where
+ (is_data, tycon)
+ = --trace "CoreUnfold.getAppDataTyConExpandingDicts:1" $
+ case (maybeAppDataTyConExpandingDicts (idType b)) of
+ Nothing -> (False, panic "discount")
+ Just (tc,_,_) -> (True, tc)
in
-- pprTrace "calcUnfold:" (ppAbove (ppr PprDebug uf) (ppr PprDebug expr))
uf
@@ -307,7 +313,7 @@ sizeExpr scc_s_OK bOMB_OUT_SIZE args expr
------------
size_up_alts scrut_ty (AlgAlts alts deflt)
= foldr (addSize . size_alg_alt) (size_up_deflt deflt) alts
- `addSizeN` (tyConFamilySize tycon)
+ `addSizeN` (if is_data then tyConFamilySize tycon else 1{-??-})
-- NB: we charge N for an alg. "case", where N is
-- the number of constructors in the thing being eval'd.
-- (You'll eventually get a "discount" of N if you
@@ -316,8 +322,11 @@ sizeExpr scc_s_OK bOMB_OUT_SIZE args expr
size_alg_alt (con,args,rhs) = size_up rhs
-- Don't charge for args, so that wrappers look cheap
- (tycon, _, _) = --trace "CoreUnfold.getAppDataTyConExpandingDicts" $
- getAppDataTyConExpandingDicts scrut_ty
+ (is_data,tycon)
+ = --trace "CoreUnfold.getAppDataTyConExpandingDicts:2" $
+ case (maybeAppDataTyConExpandingDicts scrut_ty) of
+ Nothing -> (False, panic "size_up_alts")
+ Just (tc,_,_) -> (True, tc)
size_up_alts _ (PrimAlts alts deflt)
= foldr (addSize . size_prim_alt) (size_up_deflt deflt) alts
@@ -345,7 +354,6 @@ sizeExpr scc_s_OK bOMB_OUT_SIZE args expr
sizeZero = Just (0, [])
sizeOne = Just (1, [])
sizeN n = Just (n, [])
- sizeVar v = Just (0, [v])
addSizeN Nothing _ = Nothing
addSizeN (Just (n, xs)) m
diff --git a/ghc/compiler/coreSyn/CoreUtils.lhs b/ghc/compiler/coreSyn/CoreUtils.lhs
index 9bc5a17002..de0d323b4b 100644
--- a/ghc/compiler/coreSyn/CoreUtils.lhs
+++ b/ghc/compiler/coreSyn/CoreUtils.lhs
@@ -32,6 +32,7 @@ import CoreSyn
import CostCentre ( isDictCC, CostCentre, noCostCentre )
import Id ( idType, mkSysLocal, getIdArity, isBottomingId,
toplevelishId, mkIdWithNewUniq, applyTypeEnvToId,
+ dataConRepType,
addOneToIdEnv, growIdEnvList, lookupIdEnv,
isNullIdEnv, SYN_IE(IdEnv),
GenId{-instances-}
@@ -50,7 +51,7 @@ import TyVar ( cloneTyVar,
isNullTyVarEnv, addOneToTyVarEnv, SYN_IE(TyVarEnv)
)
import Type ( mkFunTy, mkForAllTy, mkForAllUsageTy, mkTyVarTy,
- getFunTy_maybe, applyTy, isPrimType,
+ getFunTyExpandingDicts_maybe, applyTy, isPrimType,
splitSigmaTy, splitFunTy, eqTy, applyTypeEnvToTy
)
import TysWiredIn ( trueDataCon, falseDataCon )
@@ -86,7 +87,7 @@ coreExprType (Coerce _ ty _) = ty -- that's the whole point!
-- a Con is a fully-saturated application of a data constructor
-- a Prim is <ditto> of a PrimOp
-coreExprType (Con con args) = applyTypeToArgs (idType con) args
+coreExprType (Con con args) = applyTypeToArgs (dataConRepType con) args
coreExprType (Prim op args) = applyTypeToArgs (primOpType op) args
coreExprType (Lam (ValBinder binder) expr)
@@ -109,7 +110,7 @@ coreExprType (App expr val_arg)
let
fun_ty = coreExprType expr
in
- case (getFunTy_maybe fun_ty) of
+ case (getFunTyExpandingDicts_maybe False{-no peeking-} fun_ty) of
Just (_, result_ty) -> result_ty
#ifdef DEBUG
Nothing -> pprPanic "coreExprType:\n"
@@ -136,7 +137,7 @@ applyTypeToArgs op_ty args = foldl applyTypeToArg op_ty args
applyTypeToArg op_ty (TyArg ty) = applyTy op_ty ty
applyTypeToArg op_ty (UsageArg _) = panic "applyTypeToArg: UsageArg"
-applyTypeToArg op_ty val_or_lit_arg = case (getFunTy_maybe op_ty) of
+applyTypeToArg op_ty val_or_lit_arg = case (getFunTyExpandingDicts_maybe False{-no peeking-} op_ty) of
Just (_, res_ty) -> res_ty
\end{code}
diff --git a/ghc/compiler/coreSyn/NmbrCore.lhs b/ghc/compiler/coreSyn/NmbrCore.lhs
new file mode 100644
index 0000000000..f00208d037
--- /dev/null
+++ b/ghc/compiler/coreSyn/NmbrCore.lhs
@@ -0,0 +1,163 @@
+%
+% (c) The AQUA Project, Glasgow University, 1996
+%
+\section[NmbrCore]{Renumber Core for printing}
+
+\begin{code}
+#include "HsVersions.h"
+
+module NmbrCore where
+
+IMP_Ubiq(){-uitous-}
+
+import PprEnv ( NmbrEnv )
+\end{code}
+
+\begin{code}
+nmbrCoreBindings :: [CoreBinding] -> NmbrEnv -> (NmbrEnv, [CoreBinding])
+
+nmbr_bind :: CoreBinding -> NmbrEnv -> (NmbrEnv, CoreBinding)
+nmbr_expr :: CoreExpr -> NmbrEnv -> (NmbrEnv, CoreExpr)
+nmbr_arg :: CoreArg -> NmbrEnv -> (NmbrEnv, CoreArg)
+
+nmbrCoreBindings nenv [] = (nenv, [])
+nmbrCoreBindings nenv (b:bs)
+ = let
+ (new_nenv, new_b) = nmbr_bind nenv b
+ (fin_nenv, new_bs) = nmbrCoreBindings new_nenv bs
+ in
+ (fin_nenv, new_b : new_bs)
+
+nmbr_bind nenv (NonRec binder rhs)
+ -- remember, binder cannot appear in rhs
+ = let
+ (_, new_rhs) = nmbr_expr nenv rhs
+ (nenv2, new_binder) = addId nenv binder
+ in
+ (nenv2, NonRec new_binder new_rhs)
+
+nmbr_bind nenv (Rec binds)
+ = -- for letrec, we plug in new bindings BEFORE cloning rhss
+ let
+ (binders, rhss) = unzip binds
+
+ (nenv2, new_binders) = mapAccumL addId nenv binders
+
+ (_, new_rhss) = mapAndUnzip (nmbr_expr nenv2) rhss
+ in
+ returnUs (nenv2, Rec (zipEqual "nmbr_bind" new_binders new_rhss))
+\end{code}
+
+\begin{code}
+nmbr_arg nenv (VarArg v)
+ = let
+ (nenv2, new_v) = nmbrId nenv v
+ in
+ (nenv2, VarArg new_v)
+
+nmbr_arg nenv (TyArg ty)
+ = let
+ (nenv2, new_ty) = nmbrType nenv ty
+ in
+ (nenv2, TyArg new_ty)
+
+nmbr_arg nenv (UsageArg use)
+ = let
+ (nenv2, new_use) = nmbrUsage nenv use
+ in
+ (nenv2, UsageArg new_use)
+\end{code}
+
+\begin{code}
+nmbr_expr :: NmbrEnv
+ -> TypeEnv
+ -> CoreExpr
+ -> UniqSM CoreExpr
+
+nmbr_expr nenv tenv orig_expr@(Var var)
+ = returnUs (
+ case (lookupIdEnv nenv var) of
+ Nothing -> --false:ASSERT(toplevelishId var) (SIGH)
+ orig_expr
+ Just expr -> expr
+ )
+
+nmbr_expr nenv tenv e@(Lit _) = returnUs e
+
+nmbr_expr nenv tenv (Con con as)
+ = mapUs (nmbr_arg nenv tenv) as `thenUs` \ new_as ->
+ mkCoCon con new_as
+
+nmbr_expr nenv tenv (Prim op as)
+ = mapUs (nmbr_arg nenv tenv) as `thenUs` \ new_as ->
+ do_PrimOp op `thenUs` \ new_op ->
+ mkCoPrim new_op new_as
+ where
+ do_PrimOp (CCallOp label is_asm may_gc arg_tys result_ty)
+ = let
+ new_arg_tys = map (applyTypeEnvToTy tenv) arg_tys
+ new_result_ty = applyTypeEnvToTy tenv result_ty
+ in
+ returnUs (CCallOp label is_asm may_gc new_arg_tys new_result_ty)
+
+ do_PrimOp other_op = returnUs other_op
+
+nmbr_expr nenv tenv (Lam binder expr)
+ = dup_binder tenv binder `thenUs` \(new_binder, (old,new)) ->
+ let new_nenv = addOneToIdEnv nenv old new in
+ nmbr_expr new_nenv tenv expr `thenUs` \ new_expr ->
+ returnUs (Lam new_binder new_expr)
+
+nmbr_expr nenv tenv (App expr arg)
+ = nmbr_expr nenv tenv expr `thenUs` \ new_expr ->
+ nmbr_arg nenv tenv arg `thenUs` \ new_arg ->
+ mkCoApps new_expr [new_arg] -- ToDo: more efficiently?
+
+nmbr_expr nenv tenv (Case expr alts)
+ = nmbr_expr nenv tenv expr `thenUs` \ new_expr ->
+ do_alts nenv tenv alts `thenUs` \ new_alts ->
+ returnUs (Case new_expr new_alts)
+ where
+ do_alts nenv tenv (AlgAlts alts deflt)
+ = mapUs (do_boxed_alt nenv tenv) alts `thenUs` \ new_alts ->
+ do_default nenv tenv deflt `thenUs` \ new_deflt ->
+ returnUs (AlgAlts new_alts new_deflt)
+ where
+ do_boxed_alt nenv tenv (con, binders, expr)
+ = mapAndUnzipUs (dup_binder tenv) binders `thenUs` \ (new_binders, new_vmaps) ->
+ let new_nenv = growIdEnvList nenv new_vmaps in
+ nmbr_expr new_nenv tenv expr `thenUs` \ new_expr ->
+ returnUs (con, new_binders, new_expr)
+
+
+ do_alts nenv tenv (PrimAlts alts deflt)
+ = mapUs (do_unboxed_alt nenv tenv) alts `thenUs` \ new_alts ->
+ do_default nenv tenv deflt `thenUs` \ new_deflt ->
+ returnUs (PrimAlts new_alts new_deflt)
+ where
+ do_unboxed_alt nenv tenv (lit, expr)
+ = nmbr_expr nenv tenv expr `thenUs` \ new_expr ->
+ returnUs (lit, new_expr)
+
+ do_default nenv tenv NoDefault = returnUs NoDefault
+
+ do_default nenv tenv (BindDefault binder expr)
+ = dup_binder tenv binder `thenUs` \ (new_binder, (old, new)) ->
+ let new_nenv = addOneToIdEnv nenv old new in
+ nmbr_expr new_nenv tenv expr `thenUs` \ new_expr ->
+ returnUs (BindDefault new_binder new_expr)
+
+nmbr_expr nenv tenv (Let core_bind expr)
+ = nmbr_bind nenv tenv core_bind `thenUs` \ (new_bind, new_nenv) ->
+ -- and do the body of the let
+ nmbr_expr new_nenv tenv expr `thenUs` \ new_expr ->
+ returnUs (Let new_bind new_expr)
+
+nmbr_expr nenv tenv (SCC label expr)
+ = nmbr_expr nenv tenv expr `thenUs` \ new_expr ->
+ returnUs (SCC label new_expr)
+
+nmbr_expr nenv tenv (Coerce c ty expr)
+ = nmbr_expr nenv tenv expr `thenUs` \ new_expr ->
+ returnUs (Coerce c ty new_expr)
+\end{code}
diff --git a/ghc/compiler/deSugar/DsExpr.lhs b/ghc/compiler/deSugar/DsExpr.lhs
index 4f2760ec47..cf1cf58d5c 100644
--- a/ghc/compiler/deSugar/DsExpr.lhs
+++ b/ghc/compiler/deSugar/DsExpr.lhs
@@ -424,7 +424,7 @@ dsExpr (RecordUpdOut record_expr dicts rbinds)
dsRbinds rbinds $ \ rbinds' ->
let
record_ty = coreExprType record_expr'
- (tycon, inst_tys, cons) = trace "DsExpr.getAppDataTyConExpandingDicts" $
+ (tycon, inst_tys, cons) = --trace "DsExpr.getAppDataTyConExpandingDicts" $
getAppDataTyConExpandingDicts record_ty
cons_to_upd = filter has_all_fields cons
diff --git a/ghc/compiler/hsSyn/HsMatches.lhs b/ghc/compiler/hsSyn/HsMatches.lhs
index 5800e5e62f..059db6a251 100644
--- a/ghc/compiler/hsSyn/HsMatches.lhs
+++ b/ghc/compiler/hsSyn/HsMatches.lhs
@@ -120,7 +120,8 @@ pprMatch sty is_case first_match
= ([], pprGRHSsAndBinds sty is_case grhss_n_binds)
ppr_match sty is_case (SimpleMatch expr)
- = ([], ppr sty expr)
+ = ([], ppHang (ppStr (if is_case then "->" else "="))
+ 4 (ppr sty expr))
----------------------------------------------------------
diff --git a/ghc/compiler/nativeGen/StixInteger.lhs b/ghc/compiler/nativeGen/StixInteger.lhs
index a019c52140..ef901f0808 100644
--- a/ghc/compiler/nativeGen/StixInteger.lhs
+++ b/ghc/compiler/nativeGen/StixInteger.lhs
@@ -25,7 +25,7 @@ import PrimOp ( PrimOp(..) )
import PrimRep ( PrimRep(..) )
import SMRep ( SMRep(..), SMSpecRepKind, SMUpdateKind )
import Stix ( getUniqLabelNCG, sStLitLbl, stgHp, stgHpLim,
- StixTree(..), StixTreeList(..),
+ StixTree(..), SYN_IE(StixTreeList),
CodeSegment, StixReg
)
import StixMacro ( macroCode, heapCheck )
diff --git a/ghc/compiler/prelude/PrelVals.lhs b/ghc/compiler/prelude/PrelVals.lhs
index 37d6f6b746..84fd4d915a 100644
--- a/ghc/compiler/prelude/PrelVals.lhs
+++ b/ghc/compiler/prelude/PrelVals.lhs
@@ -109,6 +109,9 @@ openAlphaTy = mkTyVarTy openAlphaTyVar
errorTy :: Type
errorTy = mkSigmaTy [openAlphaTyVar] [] (mkFunTys [mkListTy charTy] openAlphaTy)
+ -- Notice the openAlphaTyVar. It says that "error" can be applied
+ -- to unboxed as well as boxed types. This is OK because it never
+ -- returns, so the return type is irrelevant.
\end{code}
We want \tr{GHCbase.trace} to be wired in
diff --git a/ghc/compiler/prelude/PrimOp.lhs b/ghc/compiler/prelude/PrimOp.lhs
index 413bdf70f2..1e62e9c326 100644
--- a/ghc/compiler/prelude/PrimOp.lhs
+++ b/ghc/compiler/prelude/PrimOp.lhs
@@ -1345,7 +1345,7 @@ primOpInfo ErrorIOPrimOp -- errorIO# :: PrimIO () -> State# RealWorld#
primOpInfo (CCallOp _ _ _ arg_tys result_ty)
= AlgResult SLIT("ccall#") [] arg_tys result_tycon tys_applied
where
- (result_tycon, tys_applied, _) = -- trace "PrimOp.getAppDataTyConExpandingDicts" $
+ (result_tycon, tys_applied, _) = --trace "PrimOp.getAppDataTyConExpandingDicts" $
getAppDataTyConExpandingDicts result_ty
#ifdef DEBUG
diff --git a/ghc/compiler/rename/Rename.lhs b/ghc/compiler/rename/Rename.lhs
index 2d8bd92945..54348b99c8 100644
--- a/ghc/compiler/rename/Rename.lhs
+++ b/ghc/compiler/rename/Rename.lhs
@@ -14,7 +14,7 @@ IMP_Ubiq()
IMPORT_1_3(List(partition))
import HsSyn
-import RdrHsSyn ( RdrNameHsModule(..), RdrNameImportDecl(..) )
+import RdrHsSyn ( SYN_IE(RdrNameHsModule), SYN_IE(RdrNameImportDecl) )
import RnHsSyn ( RnName(..){-.. is for Ix hack only-}, SYN_IE(RenamedHsModule), isRnTyConOrClass, isRnWired )
--ToDo:rm: all for debugging only
diff --git a/ghc/compiler/rename/RnNames.lhs b/ghc/compiler/rename/RnNames.lhs
index f7879502d7..28cd29aeaf 100644
--- a/ghc/compiler/rename/RnNames.lhs
+++ b/ghc/compiler/rename/RnNames.lhs
@@ -389,7 +389,10 @@ doImportDecls iface_cache g_info us src_imps
rec_imp_fn :: Name -> (ExportFlag, [SrcLoc])
rec_imp_fn n = case lookupUFM rec_imp_fm n of
- Nothing -> panic "RnNames:rec_imp_fn"
+ Nothing -> (NotExported,[mkBuiltinSrcLoc])
+ -- panic "RnNames:rec_imp_fn"
+ -- but the panic can show up
+ -- in error messages
Just (flag, locns) -> (flag, bagToList locns)
i_info = (g_info, emptyFM, emptyFM, rec_imp_fn)
diff --git a/ghc/compiler/simplCore/BinderInfo.lhs b/ghc/compiler/simplCore/BinderInfo.lhs
index 43aa0bd7dc..9b44d2ee41 100644
--- a/ghc/compiler/simplCore/BinderInfo.lhs
+++ b/ghc/compiler/simplCore/BinderInfo.lhs
@@ -16,7 +16,7 @@ module BinderInfo (
inlineUnconditionally, okToInline,
- addBinderInfo, orBinderInfo,
+ addBinderInfo, orBinderInfo, andBinderInfo,
argOccurrence, funOccurrence, dangerousArgOcc, noBinderInfo,
markMany, markDangerousToDup, markInsideSCC,
@@ -117,20 +117,24 @@ okToInline BottomForm occ_info small_enough
-- This used to be checked for, but I can't
-- see why so I've left it out.
--- Non-WHNFs can be inlined if they occur once, or are small
-okToInline OtherForm (OneOcc _ _ _ n_alts _) small_enough | n_alts <= 1 = True
-okToInline OtherForm any_occ small_enough = small_enough
-
--- A WHNF can be inlined if it doesn't occur inside a lambda,
+-- A WHNF can be inlined if it occurs once, or is small
+okToInline form occ_info small_enough
+ | is_whnf_form form
+ = small_enough || one_occ
+ where
+ one_occ = case occ_info of
+ OneOcc _ _ _ n_alts _ -> n_alts <= 1
+ other -> False
+
+ is_whnf_form VarForm = True
+ is_whnf_form ValueForm = True
+ is_whnf_form other = False
+
+-- A non-WHNF can be inlined if it doesn't occur inside a lambda,
-- and occurs exactly once or
-- occurs once in each branch of a case and is small
-okToInline form (OneOcc _ NoDupDanger _ n_alts _) small_enough
- = is_whnf_form form &&
- (n_alts <= 1 || small_enough)
- where
- is_whnf_form VarForm = True
- is_whnf_form ValueForm = True
- is_whnf_form other = False
+okToInline OtherForm (OneOcc _ NoDupDanger _ n_alts _) small_enough
+ = n_alts <= 1 || small_enough
okToInline form any_occ small_enough = False
\end{code}
@@ -194,49 +198,55 @@ addBinderInfo info1 DeadCode = info1
addBinderInfo info1 info2
= ManyOcc (min (getBinderInfoArity info1) (getBinderInfoArity info2))
--- (orBinderInfo orig new) is used in two situations:
--- First, it combines occurrence info from branches of a case
---
--- Second, when a variable whose occurrence
--- info is currently "orig" is bound to a variable whose occurrence info is "new"
--- eg (\new -> e) orig
--- What we want to do is to *worsen* orig's info to take account of new's
+-- (orBinderInfo orig new) is used when combining occurrence
+-- info from branches of a case
orBinderInfo DeadCode info2 = info2
orBinderInfo info1 DeadCode = info1
orBinderInfo (OneOcc posn1 dup1 scc1 n_alts1 ar_1)
- (OneOcc posn2 dup2 scc2 n_alts2 ar_2)
+ (OneOcc posn2 dup2 scc2 n_alts2 ar_2)
= OneOcc (combine_posns posn1 posn2)
(combine_dups dup1 dup2)
(combine_sccs scc1 scc2)
(n_alts1 + n_alts2)
(min ar_1 ar_2)
- where
- combine_dups DupDanger _ = DupDanger -- Too paranoid?? ToDo
- combine_dups _ DupDanger = DupDanger
- combine_dups _ _ = NoDupDanger
-
- combine_sccs InsideSCC _ = InsideSCC -- Too paranoid?? ToDo
- combine_sccs _ InsideSCC = InsideSCC
- combine_sccs _ _ = NotInsideSCC
-
orBinderInfo info1 info2
= ManyOcc (min (getBinderInfoArity info1) (getBinderInfoArity info2))
+-- (andBinderInfo orig new) is used in two situations:
+-- First, when a variable whose occurrence info
+-- is currently "orig" is bound to a variable whose occurrence info is "new"
+-- eg (\new -> e) orig
+-- What we want to do is to *worsen* orig's info to take account of new's
+--
+-- second, when completing a let-binding
+-- let new = ...orig...
+-- we compute the way orig occurs in (...orig...), and then use orBinderInfo
+-- to worsen this info by the way new occurs in the let body; then we use
+-- that to worsen orig's currently recorded occurrence info.
+
+andBinderInfo DeadCode info2 = DeadCode
+andBinderInfo info1 DeadCode = DeadCode
+andBinderInfo (OneOcc posn1 dup1 scc1 n_alts1 ar_1)
+ (OneOcc posn2 dup2 scc2 n_alts2 ar_2)
+ = OneOcc (combine_posns posn1 posn2)
+ (combine_dups dup1 dup2)
+ (combine_sccs scc1 scc2)
+ (n_alts1 + n_alts2)
+ ar_1 -- Min arity just from orig
+andBinderInfo info1 info2 = ManyOcc (getBinderInfoArity info1)
+
+
combine_posns FunOcc FunOcc = FunOcc -- see comment at FunOrArg defn
combine_posns _ _ = ArgOcc
-{-
-multiplyBinderInfo orig@(ManyOcc _) new
- = ManyOcc (min (getBinderInfoArity orig) (getBinderInfoArity new))
-
-multiplyBinderInfo orig new@(ManyOcc _)
- = ManyOcc (min (getBinderInfoArity orig) (getBinderInfoArity new))
+combine_dups DupDanger _ = DupDanger -- Too paranoid?? ToDo
+combine_dups _ DupDanger = DupDanger
+combine_dups _ _ = NoDupDanger
-multiplyBinderInfo (OneOcc posn1 dup1 scc1 n_alts1 ar_1)
- (OneOcc posn2 dup2 scc2 n_alts2 ar_2)
- = OneOcc (combine_posns posn1 posn2) ???
--}
+combine_sccs InsideSCC _ = InsideSCC -- Too paranoid?? ToDo
+combine_sccs _ InsideSCC = InsideSCC
+combine_sccs _ _ = NotInsideSCC
setBinderInfoArityToZero :: BinderInfo -> BinderInfo
setBinderInfoArityToZero DeadCode = DeadCode
diff --git a/ghc/compiler/simplCore/FoldrBuildWW.lhs b/ghc/compiler/simplCore/FoldrBuildWW.lhs
index a3e559d48c..19ec58c8f9 100644
--- a/ghc/compiler/simplCore/FoldrBuildWW.lhs
+++ b/ghc/compiler/simplCore/FoldrBuildWW.lhs
@@ -149,9 +149,7 @@ try_split_bind id expr =
worker_ty = mkForallTy (templ ++ [alphaTyVar])
(foldr mkFunTy n_ty_templ (arg_tys++[c_ty_templ,n_ty_templ]))
- wrapper_id = id `replaceIdInfo`
- (getIdInfo id `addInfo_UF`
- iWantToBeINLINEd UnfoldAlways)
+ wrapper_id = addInlinePragma id
worker_id = mkWorkerId worker_new_uq id worker_ty
noIdInfo
-- TODO : CHECK if mkWorkerId is thr
diff --git a/ghc/compiler/simplCore/MagicUFs.lhs b/ghc/compiler/simplCore/MagicUFs.lhs
index 79f659ecfa..77e43ae687 100644
--- a/ghc/compiler/simplCore/MagicUFs.lhs
+++ b/ghc/compiler/simplCore/MagicUFs.lhs
@@ -16,6 +16,7 @@ module MagicUFs (
IMP_Ubiq(){-uitous-}
IMPORT_DELOOPER(IdLoop) -- paranoia checking
+import Id ( addInlinePragma )
import CoreSyn
import SimplEnv ( SimplEnv )
import SimplMonad ( SYN_IE(SmplM), SimplCount )
@@ -446,11 +447,11 @@ foldl_fun env (TypeArg ty1:TypeArg ty2:ValArg arg_k:ValArg arg_z:ValArg arg_list
t] ->
let
- c = addIdUnfolding pre_c (iWantToBeINLINEd UnfoldAlways)
+ c = addInlinePragma pre_c
c_rhs = Lam b (Lam g' (Lam a
(Let (NonRec t (App (App (argToExpr arg_k) (VarArg a)) (VarArg b)))
(App (Var g') (VarArg t)))))
- n = addIdUnfolding pre_n (iWantToBeINLINEd UnfoldAlways)
+ n = addInlinePragma pre_n
n_rhs = Lam a' (Var a')
in
returnSmpl (Let (NonRec c c_rhs) $
@@ -489,13 +490,13 @@ foldl_fun env (TypeArg ty1:TypeArg ty2:ValArg arg_k:ValArg arg_z:ValArg arg_list
t] ->
let
- c = addIdUnfolding pre_c (iWantToBeINLINEd UnfoldAlways)
+ c = addInlinePragma pre_c
c_rhs = Lam b (Lam g_ (Lam a
(Let (NonRec t (App (App (argToExpr arg_k) (VarArg a)) (VarArg b)))
(App (Var g_) (VarArg t)))))
- n = addIdUnfolding pre_n (iWantToBeINLINEd UnfoldAlways)
+ n = addInlinePragma pre_n
n_rhs = Lam a' (Var a')
- r = addIdUnfolding pre_r (iWantToBeINLINEd UnfoldAlways)
+ r = addInlinePragma pre_r
r_rhs = mkGenApp (Var foldrId)
[TypeArg ty2,TypeArg (mkFunTys [ty1] ty1),
ValArg (VarArg c),
diff --git a/ghc/compiler/simplCore/SimplCase.lhs b/ghc/compiler/simplCore/SimplCase.lhs
index 786f723ca6..4318ec56ca 100644
--- a/ghc/compiler/simplCore/SimplCase.lhs
+++ b/ghc/compiler/simplCore/SimplCase.lhs
@@ -511,7 +511,8 @@ simplAlts env scrut (AlgAlts alts deflt) rhs_c
new_env = case scrut of
Var v -> extendEnvGivenNewRhs env1 v (Con con args)
where
- (_, ty_args, _) = getAppDataTyConExpandingDicts (idType v)
+ (_, ty_args, _) = --trace "SimplCase.getAppData..." $
+ getAppDataTyConExpandingDicts (idType v)
args = map TyArg ty_args ++ map VarArg con_args'
other -> env1
@@ -775,7 +776,8 @@ mkCoCase env scrut (AlgAlts outer_alts
v | scrut_is_var = Var scrut_var
| otherwise = Con con (map TyArg arg_tys ++ map VarArg args)
- arg_tys = case (getAppDataTyConExpandingDicts (idType deflt_var)) of
+ arg_tys = --trace "SimplCase:getAppData...:2" $
+ case (getAppDataTyConExpandingDicts (idType deflt_var)) of
(_, arg_tys, _) -> arg_tys
mkCoCase env scrut (PrimAlts
diff --git a/ghc/compiler/simplCore/SimplEnv.lhs b/ghc/compiler/simplCore/SimplEnv.lhs
index f984764279..b2be6a1510 100644
--- a/ghc/compiler/simplCore/SimplEnv.lhs
+++ b/ghc/compiler/simplCore/SimplEnv.lhs
@@ -47,7 +47,7 @@ IMP_Ubiq(){-uitous-}
IMPORT_DELOOPER(SmplLoop) -- breaks the MagicUFs / SimplEnv loop
-import BinderInfo ( orBinderInfo, noBinderInfo,
+import BinderInfo ( orBinderInfo, andBinderInfo, noBinderInfo,
BinderInfo(..){-instances, too-}, FunOrArg, DuplicationDanger, InsideSCC
)
import CgCompInfo ( uNFOLDING_CREATION_THRESHOLD )
@@ -76,7 +76,7 @@ import PprCore -- various instances
import PprStyle ( PprStyle(..) )
import PprType ( GenType, GenTyVar )
import Pretty
-import Type ( eqTy, getAppDataTyConExpandingDicts, applyTypeEnvToTy )
+import Type ( eqTy, applyTypeEnvToTy )
import TyVar ( nullTyVarEnv, addOneToTyVarEnv, growTyVarEnvList,
SYN_IE(TyVarEnv), GenTyVar{-instance Eq-}
)
@@ -424,9 +424,14 @@ extendEnvGivenBinding env@(SimplEnv chkr encl_cc ty_env in_id_env out_id_env con
-- If the out_id is already in the OutIdEnv, then just replace the
-- unfolding, leaving occurrence info alone (this must then
-- be a call via extendEnvGivenNewRhs).
- out_id_env_with_unfolding = foldl modifyOccInfo env1 (ufmToList fv_occ_info)
+ out_id_env_with_unfolding = foldl modifyOccInfo env1 full_fv_occ_info
+ -- full_fv_occ_info combines the occurrence of the current binder
+ -- with the occurrences of its RHS's free variables.
+ full_fv_occ_info = [ (uniq, fv_occ `andBinderInfo` occ_info)
+ | (uniq,fv_occ) <- ufmToList fv_occ_info
+ ]
env1 = addToUFM_C modifyOutEnvItem out_id_env out_id
- (out_id, occ_info, OutUnfolding unf_cc unfolding)
+ (out_id, occ_info, rhs_info)
-- Occurrence-analyse the RHS
-- The "interesting" free variables we want occurrence info for are those
@@ -435,16 +440,10 @@ extendEnvGivenBinding env@(SimplEnv chkr encl_cc ty_env in_id_env out_id_env con
interesting_fvs = mkIdSet [id | (id,OneOcc _ _ _ _ _,_) <- eltsUFM out_id_env]
-- Compute unfolding details
- unfolding = SimpleUnfolding form_summary guidance template
+ rhs_info = OutUnfolding unf_cc (SimpleUnfolding form_summary guidance template)
form_summary = mkFormSummary rhs
- guidance | not (switchIsSet env IgnoreINLINEPragma) && idWantsToBeINLINEd out_id
- = UnfoldAlways
-
- | otherwise
- = calcUnfoldingGuidance True{-sccs OK-} bOMB_OUT_SIZE rhs
-
- bOMB_OUT_SIZE = getSimplIntSwitch chkr SimplUnfoldingCreationThreshold
+ guidance = mkSimplUnfoldingGuidance chkr out_id rhs
-- Compute cost centre for thing
unf_cc | noCostCentreAttached expr_cc = encl_cc
@@ -452,29 +451,63 @@ extendEnvGivenBinding env@(SimplEnv chkr encl_cc ty_env in_id_env out_id_env con
where
expr_cc = coreExprCc rhs
+{- We need to be pretty careful when extending
+ the environment with RHS info in recursive groups.
+
+Here's a nasty example:
+
+ letrec r = f x
+ t = r
+ x = ...t...
+ in
+ ...t...
+
+Here, r occurs exactly once, so we may reasonably inline r in t's RHS.
+But the pre-simplified t's rhs is an atom, r, so we may also decide to
+inline t everywhere. But if we do *both* these reasonable things we get
+
+ letrec r = f x
+ t = f x
+ x = ...r...
+ in
+ ...t...
+
+(The t in the body doesn't get inlined because by the time the recursive
+group is done we see that t's RHS isn't an atom.)
+
+Bad news! (f x) is duplicated! Our solution is to only be prepared to
+inline RHSs in their own RHSs if they are *values* (lambda or constructor).
+
+This means that silly x=y bindings in recursive group will never go away. Sigh. ToDo!
+-}
+
extendEnvForRecBinding env@(SimplEnv chkr encl_cc ty_env in_id_env out_id_env con_apps)
(out_id, ((_,occ_info), old_rhs))
= SimplEnv chkr encl_cc ty_env in_id_env new_out_id_env con_apps
where
- new_out_id_env = case guidance of
- UnfoldNever -> out_id_env -- No new stuff to put in
- other -> out_id_env_with_unfolding
+ new_out_id_env = case (form_summary, guidance) of
+ (ValueForm, UnfoldNever) -> out_id_env -- No new stuff to put in
+ (ValueForm, _) -> out_id_env_with_unfolding
+ other -> out_id_env -- Not a value
-- If there is an unfolding, we add rhs-info for out_id,
-- No need to modify occ info because RHS is pre-simplification
out_id_env_with_unfolding = addOneToIdEnv out_id_env out_id
- (out_id, occ_info, InUnfolding env unfolding)
+ (out_id, occ_info, rhs_info)
-- Compute unfolding details
- unfolding = SimpleUnfolding form_summary guidance old_rhs
+ rhs_info = InUnfolding env (SimpleUnfolding form_summary guidance old_rhs)
form_summary = mkFormSummary old_rhs
+ guidance = mkSimplUnfoldingGuidance chkr out_id (unTagBinders old_rhs)
- guidance | not (switchIsSet env IgnoreINLINEPragma) && idWantsToBeINLINEd out_id
- = UnfoldAlways
- | otherwise
- = calcUnfoldingGuidance True{-sccs OK-} bOMB_OUT_SIZE (unTagBinders old_rhs)
+mkSimplUnfoldingGuidance chkr out_id rhs
+ | not (switchIsOn chkr IgnoreINLINEPragma) && idWantsToBeINLINEd out_id
+ = UnfoldAlways
+ | otherwise
+ = calcUnfoldingGuidance True{-sccs OK-} bOMB_OUT_SIZE rhs
+ where
bOMB_OUT_SIZE = getSimplIntSwitch chkr SimplUnfoldingCreationThreshold
extendEnvGivenRhsInfo :: SimplEnv -> OutId -> BinderInfo -> RhsInfo -> SimplEnv
diff --git a/ghc/compiler/simplCore/SimplVar.lhs b/ghc/compiler/simplCore/SimplVar.lhs
index 4e4ef5582a..2a6499e4c9 100644
--- a/ghc/compiler/simplCore/SimplVar.lhs
+++ b/ghc/compiler/simplCore/SimplVar.lhs
@@ -37,7 +37,6 @@ import Pretty ( ppBesides, ppStr )
import SimplEnv
import SimplMonad
import TyCon ( tyConFamilySize )
-import Type ( isPrimType, getAppDataTyConExpandingDicts, maybeAppDataTyConExpandingDicts )
import Util ( pprTrace, assertPanic, panic )
import Maybes ( maybeToBool )
\end{code}
diff --git a/ghc/compiler/simplCore/Simplify.lhs b/ghc/compiler/simplCore/Simplify.lhs
index f1ac5d87f8..2141e078cd 100644
--- a/ghc/compiler/simplCore/Simplify.lhs
+++ b/ghc/compiler/simplCore/Simplify.lhs
@@ -22,13 +22,14 @@ import CoreUtils ( coreExprType, nonErrorRHSs, maybeErrorApp,
unTagBinders, squashableDictishCcExpr
)
import Id ( idType, idWantsToBeINLINEd,
+ externallyVisibleId,
getIdDemandInfo, addIdDemandInfo,
GenId{-instance NamedThing-}
)
import IdInfo ( willBeDemanded, DemandInfo )
import Literal ( isNoRepLit )
import Maybes ( maybeToBool )
-import Name ( isLocallyDefined )
+--import Name ( isExported )
import PprStyle ( PprStyle(..) )
import PprType ( GenType{-instance Outputable-} )
import Pretty ( ppAbove )
@@ -193,8 +194,8 @@ simplTopBinds env [] = returnSmpl []
simplTopBinds env (NonRec binder@(in_id,occ_info) rhs : binds)
= -- No cloning necessary at top level
-- Process the binding
- simplRhsExpr env binder rhs `thenSmpl` \ rhs' ->
- completeNonRec True env binder rhs' `thenSmpl` \ (new_env, binds1') ->
+ simplRhsExpr env binder rhs `thenSmpl` \ rhs' ->
+ completeNonRec env binder in_id rhs' `thenSmpl` \ (new_env, binds1') ->
-- Process the other bindings
simplTopBinds new_env binds `thenSmpl` \ binds2' ->
@@ -774,7 +775,8 @@ simplBind env (NonRec binder@(id,occ_info) rhs) body_c body_ty
complete_bind env rhs
= simplRhsExpr env binder rhs `thenSmpl` \ rhs' ->
- completeNonRec False env binder rhs' `thenSmpl` \ (new_env, binds) ->
+ cloneId env binder `thenSmpl` \ new_id ->
+ completeNonRec env binder new_id rhs' `thenSmpl` \ (new_env, binds) ->
body_c new_env `thenSmpl` \ body' ->
returnSmpl (mkCoLetsAny binds body')
@@ -1060,63 +1062,64 @@ x. That's just what completeLetBinding does.
\begin{code}
- -- Sigh: rather disgusting case for coercions. We want to
- -- ensure that all let-bound Coerces have atomic bodies, so
- -- they can freely be inlined.
-completeNonRec top_level env binder@(_,occ_info) (Coerce coercion ty rhs)
- = (case rhs of
- Var v -> returnSmpl (env, [], rhs)
- Lit l -> returnSmpl (env, [], rhs)
- other -> newId (coreExprType rhs) `thenSmpl` \ inner_id ->
- completeNonRec top_level env
- (inner_id, dangerousArgOcc) rhs `thenSmpl` \ (env1, extra_bind) ->
- -- Dangerous occ because, like constructor args,
- -- it can be duplicated easily
- let
- atomic_rhs = case lookupId env1 inner_id of
- LitArg l -> Lit l
- VarArg v -> Var v
- in
- returnSmpl (env1, extra_bind, atomic_rhs)
- ) `thenSmpl` \ (env1, extra_bind, atomic_rhs) ->
- -- Tiresome to do all this, but we must treat the lit/var cases specially
- -- or we get a tick for atomic rhs when effectively it's a no-op.
-
- cloneId env1 binder `thenSmpl` \ new_id ->
- let
- new_rhs = Coerce coercion ty atomic_rhs
- env2 = extendIdEnvWithClone env1 binder new_id
- new_env = extendEnvGivenBinding env2 occ_info new_id new_rhs
- in
- returnSmpl (new_env, extra_bind ++ [NonRec new_id new_rhs])
-
-completeNonRec top_level env binder@(id,_) new_rhs
- -- See if RHS is an atom, or a reusable constructor
- | maybeToBool maybe_atomic_rhs
- = let
- new_env = extendIdEnvWithAtom env binder rhs_atom
- result_binds | top_level = [NonRec id new_rhs] -- Don't discard top-level bindings
- -- (they'll be dropped later if not
- -- exported and dead)
- | otherwise = []
- in
- tick atom_tick_type `thenSmpl_`
- returnSmpl (new_env, result_binds)
- where
- maybe_atomic_rhs = exprToAtom env new_rhs
- Just (rhs_atom, atom_tick_type) = maybe_atomic_rhs
-
-completeNonRec top_level env binder@(old_id,occ_info) new_rhs
- = (if top_level then
- returnSmpl old_id -- Only clone local binders
- else
- cloneId env binder
- ) `thenSmpl` \ new_id ->
+ -- We want to ensure that all let-bound Coerces have
+ -- atomic bodies, so they can freely be inlined.
+completeNonRec env binder new_id (Coerce coercion ty rhs)
+ | not (is_atomic rhs)
+ = newId (coreExprType rhs) `thenSmpl` \ inner_id ->
+ completeNonRec env
+ (inner_id, dangerousArgOcc) inner_id rhs `thenSmpl` \ (env1, binds1) ->
+ -- Dangerous occ because, like constructor args,
+ -- it can be duplicated easily
let
- env1 = extendIdEnvWithClone env binder new_id
- new_env = extendEnvGivenBinding env1 occ_info new_id new_rhs
+ atomic_rhs = case lookupId env1 inner_id of
+ LitArg l -> Lit l
+ VarArg v -> Var v
in
- returnSmpl (new_env, [NonRec new_id new_rhs])
+ completeNonRec env1 binder new_id
+ (Coerce coercion ty atomic_rhs) `thenSmpl` \ (env2, binds2) ->
+
+ returnSmpl (env2, binds1 ++ binds2)
+ where
+ is_atomic (Var v) = True
+ is_atomic (Lit l) = not (isNoRepLit l)
+ is_atomic other = False
+
+ -- Atomic right-hand sides.
+ -- We used to have a "tick AtomicRhs" in here, but it causes more trouble
+ -- than it's worth. For a top-level binding a = b, where a is exported,
+ -- we can't drop the binding, so we get repeated AtomicRhs ticks
+completeNonRec env binder new_id rhs@(Var v)
+ = returnSmpl (extendIdEnvWithAtom env binder (VarArg v), [NonRec new_id rhs])
+
+completeNonRec env binder new_id rhs@(Lit lit)
+ | not (isNoRepLit lit)
+ = returnSmpl (extendIdEnvWithAtom env binder (LitArg lit), [NonRec new_id rhs])
+
+ -- Right hand sides that are constructors
+ -- let v = C args
+ -- in
+ --- ...(let w = C same-args in ...)...
+ -- Then use v instead of w. This may save
+ -- re-constructing an existing constructor.
+completeNonRec env binder new_id rhs@(Con con con_args)
+ | switchIsSet env SimplReuseCon &&
+ maybeToBool maybe_existing_con &&
+ not (externallyVisibleId new_id) -- Don't bother for exported things
+ -- because we won't be able to drop
+ -- its binding.
+ = tick ConReused `thenSmpl_`
+ returnSmpl (extendIdEnvWithAtom env binder (VarArg it), [NonRec new_id rhs])
+ where
+ maybe_existing_con = lookForConstructor env con con_args
+ Just it = maybe_existing_con
+
+ -- Default case
+completeNonRec env binder@(id,occ_info) new_id rhs
+ = returnSmpl (new_env, [NonRec new_id rhs])
+ where
+ env1 = extendIdEnvWithClone env binder new_id
+ new_env = extendEnvGivenBinding env1 occ_info new_id rhs
\end{code}
%************************************************************************
@@ -1133,31 +1136,6 @@ simplArg env (TyArg ty) = TyArg (simplTy env ty)
simplArg env (VarArg id) = lookupId env id
\end{code}
-
-\begin{code}
-exprToAtom env (Var var)
- = Just (VarArg var, AtomicRhs)
-
-exprToAtom env (Lit lit)
- | not (isNoRepLit lit)
- = Just (LitArg lit, AtomicRhs)
-
-exprToAtom env (Con con con_args)
- | switchIsSet env SimplReuseCon
- -- Look out for
- -- let v = C args
- -- in
- --- ...(let w = C same-args in ...)...
- -- Then use v instead of w. This may save
- -- re-constructing an existing constructor.
- = case (lookForConstructor env con con_args) of
- Nothing -> Nothing
- Just var -> Just (VarArg var, ConReused)
-
-exprToAtom env other
- = Nothing
-\end{code}
-
%************************************************************************
%* *
\subsection[Simplify-quickies]{Some local help functions}
diff --git a/ghc/compiler/specialise/Specialise.lhs b/ghc/compiler/specialise/Specialise.lhs
index 424bcad5e4..8164e0ce05 100644
--- a/ghc/compiler/specialise/Specialise.lhs
+++ b/ghc/compiler/specialise/Specialise.lhs
@@ -1531,7 +1531,8 @@ specAlts (AlgAlts alts deflt) scrutinee_ty args
-- We use ty_args of scrutinee type to identify specialisation of
-- alternatives:
- (_, ty_args, _) = getAppDataTyConExpandingDicts scrutinee_ty
+ (_, ty_args, _) = --trace "Specialise.specAlts:getAppData..." $
+ getAppDataTyConExpandingDicts scrutinee_ty
specAlgAlt ty_args (con,binders,rhs)
= specLambdaOrCaseBody binders rhs args `thenSM` \ (binders, rhs, rhs_uds) ->
diff --git a/ghc/compiler/stranal/WorkWrap.lhs b/ghc/compiler/stranal/WorkWrap.lhs
index 8a8ff80c51..251b7b2027 100644
--- a/ghc/compiler/stranal/WorkWrap.lhs
+++ b/ghc/compiler/stranal/WorkWrap.lhs
@@ -11,12 +11,13 @@ module WorkWrap ( workersAndWrappers ) where
IMP_Ubiq(){-uitous-}
import CoreSyn
-import CoreUnfold ( Unfolding(..), UnfoldingGuidance(..) )
-IMPORT_DELOOPER(IdLoop) -- ToDo:rm when iWantToBeINLINEd goes
+import CoreUnfold ( Unfolding(..), UnfoldingGuidance(..), SimpleUnfolding )
+import MagicUFs ( MagicUnfoldingFun )
import CoreUtils ( coreExprType )
import Id ( idWantsToBeINLINEd, getIdStrictness, mkWorkerId,
- getIdInfo, replaceIdInfo, GenId
+ addIdStrictness, addInlinePragma,
+ GenId
)
import IdInfo ( noIdInfo, addInfo_UF, indicatesWorker,
mkStrictnessInfo, StrictnessInfo(..)
@@ -24,9 +25,6 @@ import IdInfo ( noIdInfo, addInfo_UF, indicatesWorker,
import SaLib
import UniqSupply ( returnUs, thenUs, mapUs, getUnique, SYN_IE(UniqSM) )
import WwLib
-
-iWantToBeINLINEd :: UnfoldingGuidance -> Unfolding
-iWantToBeINLINEd x = NoUnfolding --ToDo:panic "WorkWrap.iWantToBeINLINEd (ToDo)"
\end{code}
We take Core bindings whose binders have their strictness attached (by
@@ -240,12 +238,9 @@ tryWW fn_id rhs
-- worker Id:
mkStrictnessInfo args_info (Just worker_id)
- wrapper_id = fn_id `replaceIdInfo`
- (getIdInfo fn_id `addInfo`
- revised_strictness_info `addInfo_UF`
- iWantToBeINLINEd UnfoldAlways)
- -- NB! the "iWantToBeINLINEd" part adds an INLINE pragma to
- -- the wrapper, which is of course what we want.
+ wrapper_id = addInlinePragma (fn_id `addIdStrictness`
+ revised_strictness_info)
+ -- NB the "addInlinePragma" part; we want to inline wrappers everywhere
in
returnUs [ (worker_id, worker_rhs), -- worker comes first
(wrapper_id, wrapper_rhs) ] -- because wrapper mentions it
diff --git a/ghc/compiler/typecheck/TcExpr.lhs b/ghc/compiler/typecheck/TcExpr.lhs
index 8015b6ded1..9c59b43d74 100644
--- a/ghc/compiler/typecheck/TcExpr.lhs
+++ b/ghc/compiler/typecheck/TcExpr.lhs
@@ -377,7 +377,8 @@ tcExpr (RecordUpd record_expr rbinds)
-- Check that the field names are plausible
zonkTcType record_ty `thenNF_Tc` \ record_ty' ->
let
- (tycon, inst_tys, data_cons) = trace "TcExpr.getAppDataTyCon" $ getAppDataTyCon record_ty'
+ (tycon, inst_tys, data_cons) = --trace "TcExpr.getAppDataTyCon" $
+ getAppDataTyCon record_ty'
-- The record binds are non-empty (syntax); so at least one field
-- label will have been unified with record_ty by tcRecordBinds;
-- field labels must be of data type; hencd the getAppDataTyCon must succeed.
diff --git a/ghc/compiler/typecheck/TcInstDcls.lhs b/ghc/compiler/typecheck/TcInstDcls.lhs
index df32170f2b..5194f9ec7d 100644
--- a/ghc/compiler/typecheck/TcInstDcls.lhs
+++ b/ghc/compiler/typecheck/TcInstDcls.lhs
@@ -61,7 +61,7 @@ import CmdLineOpts ( opt_GlasgowExts, opt_CompilingGhcInternals,
import Class ( GenClass, GenClassOp,
isCcallishClass, classBigSig,
classOps, classOpLocalType,
- classOpTagByString
+ classOpTagByString_maybe
)
import Id ( GenId, idType, isDefaultMethodId_maybe )
import ListSetOps ( minusList )
@@ -602,10 +602,13 @@ processInstBinds1 clas avail_insts method_ids mbind
-- Make a method id for the method
let
- tag = classOpTagByString clas occ
- method_id = method_ids !! (tag-1)
- method_ty = tcIdType method_id
+ maybe_tag = classOpTagByString_maybe clas occ
+ (Just tag) = maybe_tag
+ method_id = method_ids !! (tag-1)
+ method_ty = tcIdType method_id
in
+ -- check that the method mentioned is actually in the class:
+ checkMaybeTc maybe_tag (instMethodNotInClassErr occ clas) `thenTc_`
tcInstTcType method_ty `thenNF_Tc` \ (method_tyvars, method_rho) ->
let
@@ -921,6 +924,10 @@ omitDefaultMethodWarn clas_op clas_name inst_ty sty
ppr sty clas_op, ppStr "in instance",
ppPStr clas_name, pprParendGenType sty inst_ty]
+instMethodNotInClassErr occ clas sty
+ = ppHang (ppStr "Instance mentions a method not in the class")
+ 4 (ppBesides [ppStr "class `", ppr sty clas, ppStr "' method `",
+ ppPStr occ, ppStr "'"])
patMonoBindsCtxt pbind sty
= ppHang (ppStr "In a pattern binding:")
diff --git a/ghc/compiler/typecheck/TcInstUtil.lhs b/ghc/compiler/typecheck/TcInstUtil.lhs
index 38b8f2fb41..9af279fc42 100644
--- a/ghc/compiler/typecheck/TcInstUtil.lhs
+++ b/ghc/compiler/typecheck/TcInstUtil.lhs
@@ -41,7 +41,8 @@ import Type ( mkSigmaTy, mkForAllTys, mkDictTy, mkTyVarTys,
splitForAllTy, instantiateTy, matchTy, SYN_IE(ThetaType) )
import TyVar ( GenTyVar )
import Unique ( Unique )
-import Util ( equivClasses, zipWithEqual, panic )
+import Util ( equivClasses, zipWithEqual, panic{-, pprTrace-} )
+--import PprStyle
import IdInfo ( noIdInfo )
--import TcPragmas ( tcDictFunPragmas, tcGenPragmas )
@@ -119,6 +120,8 @@ mkInstanceRelatedIds from_here src_loc inst_mod inst_pragmas
returnTc (mkDictFunId dfun_uniq clas inst_ty dfun_ty from_here src_loc inst_mod dfun_id_info)
) `thenTc` \ dfun_id ->
+-- pprTrace "DFUN: " (ppr PprDebug dfun_id) $
+
-- MAKE THE CONSTANT-METHOD IDS
-- if there are no type variables involved
(if (null inst_decl_theta)
diff --git a/ghc/compiler/typecheck/TcModule.lhs b/ghc/compiler/typecheck/TcModule.lhs
index 077b0791ef..113c82e0fd 100644
--- a/ghc/compiler/typecheck/TcModule.lhs
+++ b/ghc/compiler/typecheck/TcModule.lhs
@@ -128,7 +128,11 @@ tcModule rn_env
-- pragmas, which is done lazily [ie failure just drops the pragma
-- without having any global-failure effect].
+ -- trace "tc1" $
+
fixTc (\ ~(_, _, _, _, _, _, sig_ids) ->
+
+ -- trace "tc2" $
tcExtendGlobalValEnv sig_ids (
-- The knot for instance information. This isn't used at all
@@ -140,6 +144,7 @@ tcModule rn_env
tcTyAndClassDecls1 rec_inst_mapper ty_decls_bag cls_decls_bag
`thenTc` \ env ->
+ --trace "tc3" $
-- Typecheck the instance decls, includes deriving
tcSetEnv env (
--trace "tcInstDecls:" $
@@ -147,11 +152,14 @@ tcModule rn_env
mod_name rn_env fixities
) `thenTc` \ (inst_info, deriv_binds, ddump_deriv) ->
+ --trace "tc4" $
buildInstanceEnvs inst_info `thenTc` \ inst_mapper ->
returnTc (inst_mapper, env, inst_info, deriv_binds, ddump_deriv)
) `thenTc` \ (_, env, inst_info, deriv_binds, ddump_deriv) ->
+
+ --trace "tc5" $
tcSetEnv env (
-- Default declarations
@@ -181,11 +189,13 @@ tcModule rn_env
-- we silently discard the pragma
tcInterfaceSigs sigs `thenTc` \ sig_ids ->
tcGetEnv `thenNF_Tc` \ env ->
+ --trace "tc6" $
returnTc (env, inst_info, data_binds, deriv_binds, ddump_deriv, defaulting_tys, sig_ids)
)))) `thenTc` \ (env, inst_info, data_binds, deriv_binds, ddump_deriv, defaulting_tys, _) ->
+ --trace "tc7" $
tcSetEnv env ( -- to the end...
tcSetDefaultTys defaulting_tys ( -- ditto
@@ -197,6 +207,7 @@ tcModule rn_env
(val_decls `ThenBinds` deriv_binds)
( -- Second pass over instance declarations,
-- to compile the bindings themselves.
+ --trace "tc8" $
tcInstDecls2 inst_info `thenNF_Tc` \ (lie_instdecls, inst_binds) ->
tcClassDecls2 cls_decls_bag `thenNF_Tc` \ (lie_clasdecls, cls_binds) ->
tcGetEnv `thenNF_Tc` \ env ->
@@ -212,6 +223,7 @@ tcModule rn_env
-- restriction, and no subsequent decl instantiates its
-- type. (Usually, ambiguous type variables are resolved
-- during the generalisation step.)
+ --trace "tc9" $
tcSimplifyTop lie_alldecls `thenTc` \ const_insts ->
-- Backsubstitution. Monomorphic top-level decls may have
diff --git a/ghc/compiler/types/Class.lhs b/ghc/compiler/types/Class.lhs
index adfbe516f2..e7630b0539 100644
--- a/ghc/compiler/types/Class.lhs
+++ b/ghc/compiler/types/Class.lhs
@@ -14,7 +14,7 @@ module Class (
classSuperDictSelId, classOpId, classDefaultMethodId,
classSig, classBigSig, classInstEnv,
isSuperClassOf,
- classOpTagByString,
+ classOpTagByString, classOpTagByString_maybe,
derivableClassKeys, needsDataDeclCtxtClassKeys,
cCallishClassKeys, isNoDictClass,
@@ -331,17 +331,23 @@ classOpLocalType (ClassOp _ _ ty) = ty
Rather unsavoury ways of getting ClassOp tags:
\begin{code}
-classOpTagByString :: Class -> FAST_STRING -> Int
+classOpTagByString_maybe :: Class -> FAST_STRING -> Maybe Int
+classOpTagByString :: Class -> FAST_STRING -> Int
classOpTagByString clas op
+ = case (classOpTagByString_maybe clas op) of
+ Just tag -> tag
+#ifdef DEBUG
+ Nothing -> pprPanic "classOpTagByString:" (ppCat (ppPStr op : map (ppPStr . classOpString) (classOps clas)))
+#endif
+
+classOpTagByString_maybe clas op
= go (map classOpString (classOps clas)) 1
where
+ go [] _ = Nothing
go (n:ns) tag = if n == op
- then tag
+ then Just tag
else go ns (tag+1)
-#ifdef DEBUG
- go [] tag = pprPanic "classOpTagByString:" (ppCat (ppPStr op : map (ppPStr . classOpString) (classOps clas)))
-#endif
\end{code}
%************************************************************************
diff --git a/ghc/compiler/types/PprType.lhs b/ghc/compiler/types/PprType.lhs
index 7a6480f22a..300160053e 100644
--- a/ghc/compiler/types/PprType.lhs
+++ b/ghc/compiler/types/PprType.lhs
@@ -453,6 +453,7 @@ getTyDescription ty
TyConTy tycon _ -> _UNPK_ (getLocalName tycon)
SynTy tycon _ _ -> _UNPK_ (getLocalName tycon)
DictTy _ _ _ -> "dict"
+ ForAllTy _ ty -> getTyDescription ty
_ -> pprPanic "getTyDescription: other" (pprType PprDebug tau_ty)
}
where
diff --git a/ghc/compiler/types/Type.lhs b/ghc/compiler/types/Type.lhs
index 7b77b998a1..d63cecc64d 100644
--- a/ghc/compiler/types/Type.lhs
+++ b/ghc/compiler/types/Type.lhs
@@ -64,7 +64,7 @@ import Maybes ( maybeToBool, assocMaybe )
import PrimRep ( PrimRep(..) )
import Unique -- quite a few *Keys
import Util ( thenCmp, zipEqual, assoc,
- panic, panic#, assertPanic,
+ panic, panic#, assertPanic, pprPanic,
Ord3(..){-instances-}
)
-- ToDo:rm all these
@@ -78,6 +78,7 @@ import Util ( thenCmp, zipEqual, assoc,
-- UniqFM (ufmToList )
--import {-mumble-}
-- Outputable
+--import PprEnv
\end{code}
Data types
@@ -472,7 +473,7 @@ get_app_data_tycon maybe ty
= case maybe ty of
Just stuff -> stuff
#ifdef DEBUG
- Nothing -> panic "Type.getAppDataTyCon" -- (pprGenType PprShowAll ty)
+ Nothing -> panic "Type.getAppDataTyCon"-- (pprGenType PprShowAll ty)
#endif
diff --git a/ghc/compiler/utils/Bag.lhs b/ghc/compiler/utils/Bag.lhs
index 36fe3148ad..15678cfbe8 100644
--- a/ghc/compiler/utils/Bag.lhs
+++ b/ghc/compiler/utils/Bag.lhs
@@ -4,13 +4,18 @@
\section[Bags]{@Bag@: an unordered collection with duplicates}
\begin{code}
+#ifdef COMPILING_GHC
#include "HsVersions.h"
+#endif
module Bag (
Bag, -- abstract type
emptyBag, unitBag, unionBags, unionManyBags,
- mapBag, -- UNUSED: elemBag,
+ mapBag,
+#ifndef COMPILING_GHC
+ elemBag,
+#endif
filterBag, partitionBag, concatBag, foldBag,
isEmptyBag, consBag, snocBag,
listToBag, bagToList
@@ -22,6 +27,8 @@ IMPORT_1_3(List(partition))
import Outputable ( interpp'SP )
import Pretty
+#else
+import List(partition)
#endif
data Bag a
@@ -35,7 +42,7 @@ data Bag a
emptyBag = EmptyBag
unitBag = UnitBag
-{- UNUSED:
+#ifndef COMPILING_GHC
elemBag :: Eq a => a -> Bag a -> Bool
elemBag x EmptyBag = False
@@ -43,7 +50,7 @@ elemBag x (UnitBag y) = x==y
elemBag x (TwoBags b1 b2) = x `elemBag` b1 || x `elemBag` b2
elemBag x (ListBag ys) = any (x ==) ys
elemBag x (ListOfBags bs) = any (x `elemBag`) bs
--}
+#endif
unionManyBags [] = EmptyBag
unionManyBags xs = ListOfBags xs
@@ -55,8 +62,9 @@ unionBags b EmptyBag = b
unionBags b1 b2 = TwoBags b1 b2
consBag :: a -> Bag a -> Bag a
-consBag elt bag = (unitBag elt) `unionBags` bag
snocBag :: Bag a -> a -> Bag a
+
+consBag elt bag = (unitBag elt) `unionBags` bag
snocBag bag elt = bag `unionBags` (unitBag elt)
isEmptyBag EmptyBag = True
diff --git a/ghc/compiler/utils/FiniteMap.lhs b/ghc/compiler/utils/FiniteMap.lhs
index c95f0b4626..d8c59893f5 100644
--- a/ghc/compiler/utils/FiniteMap.lhs
+++ b/ghc/compiler/utils/FiniteMap.lhs
@@ -25,6 +25,10 @@ near the end (only \tr{#ifdef COMPILING_GHC}).
#define ASSERT(e) {--}
#define IF_NOT_GHC(a) a
#define COMMA ,
+#define _tagCmp compare
+#define _LT LT
+#define _GT GT
+#define _EQ EQ
#endif
#if defined(COMPILING_GHC) && defined(DEBUG_FINITEMAPS)/* NB NB NB */
diff --git a/ghc/compiler/utils/ListSetOps.lhs b/ghc/compiler/utils/ListSetOps.lhs
index 5a46b2391b..39172473d9 100644
--- a/ghc/compiler/utils/ListSetOps.lhs
+++ b/ghc/compiler/utils/ListSetOps.lhs
@@ -4,13 +4,15 @@
\section[ListSetOps]{Set-like operations on lists}
\begin{code}
+#ifdef COMPILING_GHC
#include "HsVersions.h"
+#endif
module ListSetOps (
unionLists,
intersectLists,
minusList
-#if ! defined(COMPILING_GHC)
+#ifndef COMPILING_GHC
, disjointLists, intersectingLists
#endif
) where
diff --git a/ghc/compiler/utils/Maybes.lhs b/ghc/compiler/utils/Maybes.lhs
index 5ed4ac316f..1f17679019 100644
--- a/ghc/compiler/utils/Maybes.lhs
+++ b/ghc/compiler/utils/Maybes.lhs
@@ -13,7 +13,6 @@ module Maybes (
MaybeErr(..),
allMaybes,
- catMaybes,
firstJust,
expectJust,
maybeToBool,
@@ -28,7 +27,9 @@ module Maybes (
returnMaybe,
thenMaB
-#if ! defined(COMPILING_GHC)
+#if defined(COMPILING_GHC)
+ , catMaybes
+#else
, findJust
, foldlMaybeErrs
, listMaybeErrs
@@ -41,6 +42,8 @@ CHK_Ubiq() -- debugging consistency check
import Unique (Unique) -- only for specialising
+#else
+import Maybe -- renamer will tell us if there are any conflicts
#endif
\end{code}
@@ -63,10 +66,12 @@ a list of @Justs@ into a single @Just@, returning @Nothing@ if there
are any @Nothings@.
\begin{code}
+#ifdef COMPILING_GHC
catMaybes :: [Maybe a] -> [a]
catMaybes [] = []
catMaybes (Nothing : xs) = catMaybes xs
catMaybes (Just x : xs) = (x : catMaybes xs)
+#endif
allMaybes :: [Maybe a] -> Maybe [a]
allMaybes [] = Just []
diff --git a/ghc/compiler/utils/Pretty.lhs b/ghc/compiler/utils/Pretty.lhs
index 985666d013..ad2a76fb9d 100644
--- a/ghc/compiler/utils/Pretty.lhs
+++ b/ghc/compiler/utils/Pretty.lhs
@@ -12,10 +12,12 @@
#endif
module Pretty (
- SYN_IE(Pretty),
#if defined(COMPILING_GHC)
+ SYN_IE(Pretty),
prettyToUn,
+#else
+ Pretty,
#endif
ppNil, ppStr, ppPStr, ppChar, ppInt, ppInteger,
ppFloat, ppDouble,
@@ -46,6 +48,8 @@ IMPORT_1_3(Ratio)
IMPORT_1_3(IO)
import Unpretty ( SYN_IE(Unpretty) )
+#else
+import Ratio
#endif
import CharSeq
diff --git a/ghc/compiler/utils/Util.lhs b/ghc/compiler/utils/Util.lhs
index adc6e65ba9..6d51f3aaf2 100644
--- a/ghc/compiler/utils/Util.lhs
+++ b/ghc/compiler/utils/Util.lhs
@@ -9,12 +9,16 @@
# define IF_NOT_GHC(a) {--}
#else
# define panic error
-# define TAG_ _CMP_TAG
-# define LT_ _LT
-# define EQ_ _EQ
-# define GT_ _GT
+# define TAG_ Ordering
+# define LT_ LT
+# define EQ_ EQ
+# define GT_ GT
+# define _LT LT
+# define _EQ EQ
+# define _GT GT
# define GT__ _
-# define tagCmp_ _tagCmp
+# define tagCmp_ compare
+# define _tagCmp compare
# define FAST_STRING String
# define ASSERT(x) {-nothing-}
# define IF_NOT_GHC(a) a
@@ -41,8 +45,8 @@ module Util (
zipLazy,
mapAndUnzip, mapAndUnzip3,
nOfThem, lengthExceeds, isSingleton,
- startsWith, endsWith,
#if defined(COMPILING_GHC)
+ startsWith, endsWith,
isIn, isn'tIn,
#endif
@@ -65,9 +69,12 @@ module Util (
mapAccumL, mapAccumR, mapAccumB,
-- comparisons
+#if defined(COMPILING_GHC)
Ord3(..), thenCmp, cmpList,
- IF_NOT_GHC(cmpString COMMA)
cmpPString,
+#else
+ cmpString,
+#endif
-- pairs
IF_NOT_GHC(cfst COMMA applyToPair COMMA applyToFst COMMA)
@@ -88,6 +95,8 @@ CHK_Ubiq() -- debugging consistency check
IMPORT_1_3(List(zipWith4))
import Pretty
+#else
+import List(zipWith4)
#endif
infixr 9 `thenCmp`
@@ -212,7 +221,7 @@ startsWith, endsWith :: String -> String -> Maybe String
startsWith [] str = Just str
startsWith (c:cs) (s:ss)
= if c /= s then Nothing else startsWith cs ss
-startWith _ [] = Nothing
+startsWith _ [] = Nothing
endsWith cs ss
= case (startsWith (reverse cs) (reverse ss)) of
@@ -715,7 +724,11 @@ cmpString (x:xs) (y:ys) = if x == y then cmpString xs ys
cmpString [] ys = LT_
cmpString xs [] = GT_
+#ifdef COMPILING_GHC
cmpString _ _ = panic# "cmpString"
+#else
+cmpString _ _ = error "cmpString"
+#endif
\end{code}
\begin{code}
diff --git a/ghc/docs/ANNOUNCE-0.06 b/ghc/docs/ANNOUNCE-0.06
deleted file mode 100644
index 8a1b6334c3..0000000000
--- a/ghc/docs/ANNOUNCE-0.06
+++ /dev/null
@@ -1,116 +0,0 @@
- The Glasgow Haskell Compiler
- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- Version 0.06 --- Hackers' release
-
-As many of you know, we have been working hard at Glasgow on a modular
-Haskell compiler. We are proud to announce its first public release.
-
-We are calling it a "Hackers' release" because it is not yet suitable
-for Haskell *programmers*. It is intended for *implementors* who are
-interested in using our compiler as a substrate for their own work.
-(A later version will indeed be a "Programmers' release".) We also
-hope that some *porters*, people who would like to see Haskell running
-on their system, will help us debug any Sun dependencies in our
-generated C files. Finally, the *curious* may simply want to see the
-World's Largest Haskell Program (40,000 lines?)!
-
-The compiler has the following characteristics:
-
- * It is written in Haskell.
-
- * It generates C as its target code.
-
- * It is specifically designed to be modular, so that others can
- use it as a "motherboard" into which they can "plug in" their
- latest whizzy strictness analyser, profiler, or whatever.
-
- * Internally, it uses the polymorphic second-order lambda calculus
- as a way to preserve correct type information in the face of
- substantial program transformations.
-
- * It implements unboxed values as described in [1]. In
- particular, the implementation of arithmetic and the exploitation
- of strictness analysis is handled just as described there.
-
- * Its back end is based on the Spineless Tagless G-machine, an
- abstract machine for non-strict functional languages. There is a
- detailed paper describing this work [2].
-
- * It plants code to gather quite a lot of simple profiling
- information.
-
- * Its runtime system is heavily configurable. For example, it
- comes complete with three different garbage collectors: two-space,
- one-space compacting, and Appel-style generational. Adding extra
- fields to heap objects (for debugging or profiling for example) is
- just a question of altering C macros; the Haskell source doesn't
- need to be recompiled. (Caveat: you have to alter them *right*!)
-
-The compiler also suffers its fair share of deficiencies:
-
- * The compiler itself is large and slow.
-
- * The code it generates is very, very unoptimised. Any
- comparisons you make of runtime speed with good existing compilers
- will be deeply unflattering. (Our next priority is optimisation.)
-
- * Several language features aren't dealt with. This has not
- prevented us from compiling and running several quite large
- Haskell programs.
-
-Please follow the pointers in the top-level README file to find all of
-the documentation in and about this release. Distribution info
-follows below.
-
-We hope you enjoy this system, and we look forward to hearing about
-your successes with it! Please report bugs to
-glasgow-haskell-bugs@dcs.glasgow.ac.uk and direct general queries to
-glasgow-haskell-request@<same>.
-
-Simon Peyton Jones
-(and his GRASPing colleagues)
-......................................................................
-
-References
-~~~~~~~~~~
-[1] Simon L Peyton Jones and John Launchbury, "Unboxed values as first
-class citizens", Functional Programming Languages and Computer
-Architecture, Boston, ed Hughes, LNCS 523, Springer Verlag, Sept 1991.
-
-[2] Simon L Peyton Jones, "Implementing lazy functional languages on
-stock hardware: the Spineless Tagless G-machine", Journal of
-Functional Programming (to appear). Also obtainable by anonymous FTP
-from ftp.dcs.glasgow.ac.uk:pub/glasgow-fp/stg.dvi.
-
-Distribution
-~~~~~~~~~~~~
-This release is available, in whole or in part, from the usual Haskell
-anonymous FTP sites, in the directory pub/haskell/glasgow:
-
- nebula.cs.yale.edu (128.36.13.1)
- ftp.dcs.glasgow.ac.uk (130.209.240.50)
- animal.cs.chalmers.se (129.16.225.66)
-
-(Beleaguered NIFTP users within the UK can get the same files by using
-a <FP>/haskell/glasgow prefix, instead of pub/haskell/glasgow.)
-
-These are the available files (for the ON DEMAND ones, please ask):
-
-ghc-0.06-src.tar.Z the basic source distribution; assumes you
- will compile it with Chalmers HBC, version
- 0.997.3 or later.
-
-ghc-0.06-proto-hi-files.tar.Z
- An "overlay" of .hi interface files, to be
- used when compiling with the *prototype*
- Glasgow Haskell compiler (version 0.411 or
- later).
-
-ghc-0.06-hc-files.tar.Z An "overlay" of .hc generated-C files; used
- either to save you the trouble of compiling
- the prelude, or because your only interest is
- porting the C to
-
-ghc-0.06-tests.tar.Z Some of our test files we have used in getting
- this beast going. We hope to grow them into a
- semi-respectable benchmark suite.
diff --git a/ghc/docs/ANNOUNCE-0.10 b/ghc/docs/ANNOUNCE-0.10
deleted file mode 100644
index 04062f1c73..0000000000
--- a/ghc/docs/ANNOUNCE-0.10
+++ /dev/null
@@ -1,135 +0,0 @@
- The Glasgow Haskell Compiler
- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-We are happy to announce the first full release of the Glasgow Haskell
-Compiler (GHC, version 0.10). It is freely available by FTP; details
-appear below.
-
-To run this release, you need a Sun4, probably with 16+MB memory, and
-GNU C (gcc), version 2.1 or greater, and "perl". If building from
-source, you will need Chalmers HBC, version 0.998.x.
-
-We hope you enjoy this system, and we look forward to hearing about
-your successes with it! Please report bugs to
-glasgow-haskell-bugs@dcs.glasgow.ac.uk and direct general queries to
-glasgow-haskell-request@<same>.
-
-Simon Peyton Jones
-(and his GRASPing colleagues)
-
-Why a Haskell programmer might want to use GHC
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-* Almost all of Haskell is implemented. In particular, the full range
- of data types is supported: arbitrary precision integers, rationals,
- double-precision floats, and "real" arrays with O(1) access time.
- (The release notes list all unimplemented language features.)
-
-* An extensible I/O system is provided, based on a "monad" [1]. (The
- standard Haskell I/O system is built on this foundation.)
-
-* A number of significant language extensions are implemented:
- - Fully fledged unboxed data types [2].
- - Ability to write arbitrary in-line C-language code, using
- the I/O monad to retain referential transparency.
- - Incrementally-updatable arrays, also embedded in a monad.
- - Mutable reference types.
-
-* By default, the system uses a generational garbage collector which
- lets you run programs whose live data is significantly larger than
- the physical memory size before thrashing occurs. (Conventional
- 2-space GC starts thrashing when the live data gets to about half
- the physical memory size.)
-
-* A new profiling system is supplied, which enables you to find out which
- bits of your program are eating both *time* and the *space* [3].
-
-* Good error messages. Well, fairly good error messages. Line
- numbers are pretty accurate, and during type checking you get
- several (accurate) error reports rather than just one.
-
-* Performance: programs compiled with GHC "usually" beat
- Chalmers-HBC-compiled ones. If you find programs where HBC wins,
- send them to us :-).
-
-* We have a pretty good test suite, and this version passes
- practically all of it. (Yes, it can compile itself, too.) We hope
- you will find the system to be robust.
-
-Why a functional-language implementor might want to use GHC
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-* We have tried very hard to write the compiler in a modular and
- well-documented way, so that other researchers can modify and extend
- it. One of our goals is specifically to provide a framework to
- support others' work. Several people are already using it in this
- way.
-
-* Highly configurable runtime system. Heavy use of C macros means
- that you can modify much of the storage representation without
- telling the compiler. For example, the system comes with 4
- different garbage collectors! (all working)
-
-* Internals: extensive use of the second-order lambda calculus as an
- intermediate code; the Spineless Tagless G-machine as evaluation
- model [4].
-
-* Various performance-measurement hooks.
-
-Main shortcomings
-~~~~~~~~~~~~~~~~~
-* No interactive system. This is a batch compiler only. (Any
- volunteers?)
-
-* Compiler is greedy on resources. Going via C doesn't help here.
-
-* This system should run on any Unix box. We haven't had time to do
- any non-Sun4 ports. Help or prodding welcome.
-
-References
-~~~~~~~~~~
-All these papers come with the distribution [in ghc/docs/papers].
-
-[1] "Imperative functional programming", Peyton Jones & Wadler, POPL '93
-
-[2] "Unboxed data types as first-class citizens", Peyton Jones &
- Launchbury, FPCA '91
-
-[3] "Profiling lazy functional languages", Sansom & Peyton Jones,
- Glasgow workshop '92
-
-[4] "Implementing lazy functional languages on stock hardware", Peyton
- Jones, Journal of Functional Programming, Apr 1992
-
-How to get it
-~~~~~~~~~~~~~
-This release is available, in whole or in part, from the usual Haskell
-anonymous FTP sites, in the directory pub/haskell/glasgow:
-
- nebula.cs.yale.edu (128.36.13.1)
- ftp.dcs.glasgow.ac.uk (130.209.240.50)
- animal.cs.chalmers.se (129.16.225.66)
-
-(Beleaguered NIFTP users within the UK can get the same files from
-Glasgow by using a <FP>/haskell/glasgow prefix, instead of
-pub/haskell/glasgow. Also, we are mirrored by src.doc.ic.ac.uk, in
-languages/haskell/glasgow, and you can get files from there by every
-means known to humanity.)
-
-These are the available files:
-
-ghc-0.10-src.tar.Z The basic source distribution; assumes you
- will compile it with Chalmers HBC, version
- 0.998.n, on a Sun4, for which you have GNU C
- (gcc) version 2.1 or greater. About 3MB.
-
-ghc-0.10-bin-sun4.tar.Z A binary distribution -- avoid compiling
- altogether! For SunOS 4.1.x; assumes you
- have GNU C (gcc) version 2.x around...
-
-ghc-0.10-patch-* Patches to the original distribution. There
- are none to start with, of course, but there
- might be by the time you grab the files.
- Please check for them.
-
-Once you have the distribution, please follow the pointers in the
-ghc/README file to find all of the documentation in and about this
-release.
diff --git a/ghc/docs/ANNOUNCE-0.16 b/ghc/docs/ANNOUNCE-0.16
deleted file mode 100644
index f6425661e4..0000000000
--- a/ghc/docs/ANNOUNCE-0.16
+++ /dev/null
@@ -1,146 +0,0 @@
- The Glasgow Haskell Compiler -- version 0.16
- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-The second public release of the Glasgow Haskell Compiler is now
-available (GHC, version 0.16). Binaries (recommended) and source are
-freely available by FTP; details appear below.
-
-GHC 0.16 is still alpha-quality software. This release in an interim
-measure, not as solid as I would prefer. However, a lot has gone in
-since December. The profiling system is Way Cool. The compiler now
-has a strictness analyser and an update analyser. Compiled programs
-tend to run faster. Compilation speed is worse. Bugs remain, but
-they tend to be work-around-able.
-
-To run this release, you need a Sun4 or Sun3, probably with 16+MB
-memory, and GNU C (gcc), version 2.1 or greater, and "perl".
-
-This system can be built from source using: itself (most likely to
-succeed), the previous GHC release (0.10) [least likely], or the
-Chalmers HBC compiler [in-between]. Please see the appropriate
-documentation for details.
-
-Please report bugs to glasgow-haskell-bugs@dcs.glasgow.ac.uk and
-direct general queries to glasgow-haskell-request@<same>.
-
-Will Partain
-(typist for the AQUA [formerly GRASP] project)
-
-....................................................................
-
-Why a Haskell programmer might want to use GHC
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-* GHC provides an extensible I/O system, based on a "monad" [1]. (The
- standard Haskell I/O system is built on this foundation.)
-
-* A number of significant language extensions are implemented:
- - Fully fledged unboxed data types [2].
- - Ability to write arbitrary in-line C-language code, using
- the I/O monad to retain referential transparency.
- - Incrementally-updatable arrays, also embedded in a monad.
- - Mutable reference types.
-
-* A new profiling system is supplied, which enables you to find out
- which bits of your program are eating both *time* and the *space* [3].
-
-* By default, the system uses a generational garbage collector which
- lets you run programs whose live data is significantly larger than
- the physical memory size before thrashing occurs. (Conventional
- 2-space GC starts thrashing when the live data gets to about half
- the physical memory size.)
-
-* Good error messages. Well, fairly good error messages. Line
- numbers are pretty accurate, and during type checking you get
- several (accurate) error reports rather than just one.
-
-* Performance: programs compiled with GHC "often" beat
- Chalmers-HBC-compiled ones. If you find programs where HBC wins,
- please report it to us, as a bug :-).
-
-Why a functional-language implementor might want to use GHC
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-* We have tried very hard to write the compiler in a modular and
- well-documented way, so that other researchers can modify and extend
- it. One of our goals is specifically to provide a framework to
- support others' work. Several people are already using it in this
- way.
-
-* Highly configurable runtime system. Heavy use of C macros means
- that you can modify much of the storage representation without
- telling the compiler. For example, the system comes with 4
- different garbage collectors! (all working)
-
-* Internals: extensive use of the second-order lambda calculus as an
- intermediate code; the Spineless Tagless G-machine as evaluation
- model [4].
-
-* Various performance-measurement hooks.
-
-Main shortcomings
-~~~~~~~~~~~~~~~~~
-* No interactive system. This is a batch compiler only. (Any
- volunteers?)
-
-* Compiler is greedy on resources. Going via C doesn't help here.
-
-* This system should run on any Unix box. We haven't had time to do
- any non-Sun ports. Help or prodding welcome.
-
-References
-~~~~~~~~~~
-All these papers come with the distribution [in ghc/docs/papers].
-
-[1] "Imperative functional programming", Peyton Jones & Wadler, POPL '93
-
-[2] "Unboxed data types as first-class citizens", Peyton Jones &
- Launchbury, FPCA '91
-
-[3] "Profiling lazy functional languages", Sansom & Peyton Jones,
- Glasgow workshop '92
-
-[4] "Implementing lazy functional languages on stock hardware", Peyton
- Jones, Journal of Functional Programming, Apr 1992
-
-How to get it
-~~~~~~~~~~~~~
-This release is available, in whole or in part, from the usual Haskell
-anonymous FTP sites, in the directory pub/haskell/glasgow:
-
- ftp.dcs.glasgow.ac.uk (130.209.240.50)
- ftp.cs.chalmers.se (129.16.225.66)
- nebula.cs.yale.edu (128.36.13.1)
-
-We are mirrored by src.doc.ic.ac.uk, in
-computing/programming/languages/haskell/glasgow, and you can get files
-from there by every means known to humanity.
-
-These are the available files (.Z for compressed, .gz for gzipped) --
-some are `on demand', ask if you don't see them:
-
-ghc-0.16-bin-sun4.tar.{Z,gz} A binary distribution -- avoid compiling
- altogether! For SunOS 4.1.x; assumes you have
- GNU C (gcc) version 2.x around...
-
-ghc-0.16-src.tar.gz The basic source distribution; about 3MB.
-
-ghc-0.16-hi-files-{hbc,ghc-0.10}.tar.gz
- Interface files for the compiler proper
- (ghc/compiler/*/*.hi), to be used if booting
- with either HBC or GHC version 0.10. (The
- distributed .hi files assume GHC version
- 0.16.)
-
-ghc-0.16-hc-files.tar.gz The intermediate C files for the compiler
- proper, the prelude, and `Hello, world'.
- Used when porting.
-
-ghc-0.16-patch-* Patches to the original distribution. There
- are none to start with, of course, but there
- might be by the time you grab the files.
- Please check for them.
-
-There are no diffs from version 0.10, as they would be laughably huge.
-
-Once you have the distribution, please follow the pointers in the
-ghc/README file to find all of the documentation in and about this
-release.
diff --git a/ghc/docs/ANNOUNCE-0.19 b/ghc/docs/ANNOUNCE-0.19
deleted file mode 100644
index 6f0523f384..0000000000
--- a/ghc/docs/ANNOUNCE-0.19
+++ /dev/null
@@ -1,130 +0,0 @@
- The Glasgow Haskell Compiler -- version 0.19
- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- "What a great system!"
-
-The third public release of the Glasgow Haskell Compiler is now
-available (GHC, version 0.19). Binaries and sources are freely
-available by FTP; details below.
-
-Highlights of what's new in 0.19 since 0.16 (July 1993):
- * Somewhat faster compilation times.
- * Still better error messages.
- * Better Haskell 1.2 compliance, including more derived instances,
- `default' declarations, renaming, etc.
- * Native-code generator for SPARC.
- * Unfoldings across module boundaries.
- * Automatic specialisation of overloaded functions.
- * Better strictness analysis, including "looking inside tuples" and
- "absence analysis" (arguments that aren't used).
- * New "simplifier" (program-transformation engine).
-
-Please see the release notes for a more complete list (including
-Backward Incompatibilities to watch out for).
-
-To run this release, you need a machine with 16+MB memory, GNU C
-(`gcc') [version 2.1 or greater], and `perl'. We have seen GHC work
-in *some* form or fashion on: Sun4s, Sun3s, DECstations, DEC Alphas,
-HP-PA boxes. Sun4s, our development platform, are by far the best
-supported. We will distribute binaries as we build them.
-
-Once you have the distribution, please follow the pointers in
-ghc/README to find all of the documentation in and about this release.
-
-Please report bugs to glasgow-haskell-bugs@dcs.glasgow.ac.uk and
-direct general queries to glasgow-haskell-request@<same>.
-
-We are very grateful to everyone who has sent a bug report, sent a
-"look at this weird result" report, lent us a machine on which to try
-a port, or (best of all) contributed code. Keep up the good work.
-
-Simon Peyton Jones
-
-Dated: 93/12/16
-....................................................................
-
-"Should I start using GHC 0.19?"
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-* If you're using a previous release of GHC: YES. (Recompile everything.)
-
-* If you want to hack on a Haskell compiler: YES.
-
-* If you're new to Haskell: Try Gofer (an interpreter for a
- Haskell-like language) first; then come back and say YES.
-
-* If you want time profiling as well as space profiling: YES.
-
-* If you need the Glasgow Haskell extensions, i.e., calling C, unboxed
- datatypes, monadic I/O etc.: YES. (ghc/README says a little more
- about these features.)
-
-* If you're using HBC at the moment: not a clear YES or NO. *We*
- really like having both compilers to play against each other. For
- example, HBC has better compilation times, but you'll like GHC's
- error messages. And you can try them both before submitting a bug
- report for either one.
-
-* If you want simulated parallel execution on a uniprocessor: NO.
- (Use the "hbcpp" variant of HBC from York.)
-
-....................................................................
-
-How to make sure every release of GHC will run your program (well)
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-*Please* send us a copy! Part of our work is to collect and study
-large and *realistic* Haskell programs. Only you can provide them.
-They need not be final, polished versions -- they just have to run.
-
-Among other things, we run every release against our entire
-collection, so if your program's in there...
-
-....................................................................
-
-How to get it
-~~~~~~~~~~~~~
-This release is available, in whole or in part, from the usual Haskell
-anonymous FTP sites, in the directory pub/haskell/glasgow:
-
- ftp.dcs.glasgow.ac.uk (130.209.240.50)
- ftp.cs.chalmers.se (129.16.225.66)
- nebula.cs.yale.edu (128.36.13.1)
-
-We are mirrored by src.doc.ic.ac.uk, in
-computing/programming/languages/haskell/glasgow, and you can get files
-from there by every means known to humanity.
-
-These are the available files (.Z for compressed, .gz for gzipped) --
-some are `on demand', ask if you don't see them:
-
-ghc-0.19-bin-sun4.tar.{Z,gz} A binary distribution -- unpack & run!
- For SunOS 4.1.x; assumes you have GNU C (gcc)
- version 2.x around...
-
-ghc-0.19-bin-<other>.tar.gz Other binary distributions -- we will
- make them available as we go along; they
- will be announced on the Haskell mailing list
- (not elsewhere).
-
-ghc-0.19-src.tar.gz The basic source distribution; about 3MB.
-
-ghc-0.19-hc-files.tar.gz The intermediate C (.hc) files for the
- compiler proper, the prelude, and `Hello,
- world'.
-
-ghc-0.19.ANNOUNCE This file
-
-ghc-0.19.{README,RELEASE-NOTES} From the distribution; for those who
- want to peek before FTPing...
-
-ghc-0.19-ps-docs.tar.gz Main GHC documents in PostScript format; in
- case your TeX setup doesn't agree with our
- DVI files...
-
-ghc-0.19-hi-files-hbc.tar.gz
- Interface files for the compiler proper
- (ghc/compiler/*/*.hi), to be used if booting
- with either HBC. (The distributed .hi files
- assume GHC version 0.19.)
-
-There are no diffs from version 0.16, as they would be laughably huge.
diff --git a/ghc/docs/ANNOUNCE-0.20 b/ghc/docs/ANNOUNCE-0.20
deleted file mode 100644
index 2e7f274cb2..0000000000
--- a/ghc/docs/ANNOUNCE-0.20
+++ /dev/null
@@ -1,55 +0,0 @@
-This is version 0.20 of the Glorious Glasgow Haskell compilation
-system (GHC).
-
-Version 0.20 is an "internal" release, intended *ONLY* for the most
-fanatical GHC hackers.
-
-* Many things about it may be broken, though it
-does compile and run most programs.
-
-* I/O and ccall scheme re-done; any such low-level code probably needs
- fixing; I/O attempts to follow 1.3 I/O proposal. All ccall
- arguments and results are automagically "boxed".
-
-* PrimOps fiddled; any code that uses them directly will probably need
- attention.
-
-* We've renamed some things, so as to move to a we-don't-steal-user-
- name-space policy. Thus "tagCmp" has become "_tagCmp". Names starting
- with underscores are now cool if -fglasgow-exts.
-
- You might want to see our "state-interface" document if you mess
- with all this low-level/non-standard stuff; I'll try to remember to
- put a copy up for FTP.
-
-* No promises about profiling.
-
-* Documentation is untouched since 0.19.
-
-Version 0.19 was the last public release. It has held up pretty well
-and should be available wherever you got 0.20 from. I commend 0.19 to
-all sane people.
-
-Configuring 0.20 is a little different than 0.19:
-
- % cd <very-top>
- % ./configure --with-boot=c
- % ./STARTUP-ghc std
- % cd ghc; make
-
-Things to note:
-
-* It's wrong for jmake to tell you "0: unknown flag -traditional"; but
- it is harmless.
-
-* The 0.20 compiler seems more likely to run out of stack; use
- -Rmax-stksize2m (or something) to increase; within the distribution,
- probably something like...
-
- % make EXTRA_HC_OPTS="-H20m -Rmax-stksize4m"
-
-See the "configure" script if you want to know what other options are
--- there is no other documentation at this time!
-
-Will Partain, AQUA project typist
-partain@dcs.glasgow.ac.uk
diff --git a/ghc/docs/ANNOUNCE-0.22 b/ghc/docs/ANNOUNCE-0.22
deleted file mode 100644
index d7fed2c9fa..0000000000
--- a/ghc/docs/ANNOUNCE-0.22
+++ /dev/null
@@ -1,109 +0,0 @@
- The Glasgow Haskell Compiler -- version 0.22
- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-A new public release of the Glasgow Haskell Compiler is now
-available (GHC, version 0.22). Binaries and sources are freely
-available by FTP; details below.
-
-Highlights of what's new in 0.22 since 0.19 (December 1993):
-
- * Faster compilation times (now about 40% slower than HBC if not
- using -O [on a Sun4]).
- * Revamped state-tranformer stuff, which affects arrays, calling out
- to C, and I/O (preparing for Haskell 1.3).
- * "Threads" stuff -- can do quasi-parallel execution on a
- uniprocessor.
- * No more space leaks from lazy pattern-matching.
- * Alastair Reid's "stable pointers" and "malloc pointers"; friendly
- interaction with "C Land".
- * Time profiling no longer attributes great chunks
- of time to "CAF". (However, because of the many recent changes,
- profiling is probably *less* reliable than before.)
- * New "GHC system library" (analogous to the "HBC system library");
- not much there, but stay tuned.
- * Fully supported on DEC Alphas. Some other porting progress.
- * Much improved configuration.
- * Less user namespace pollution by the system.
- * New mailing lists about Glasgow Haskell.
-
- - The "glasgow-haskell-users" list is for GHC users to chat among
- themselves. Subscribe by sending mail to
- "glasgow-haskell-users-request@dcs.glasgow.ac.uk". Messages for the
- list go to "glasgow-haskell-users".
-
- - The "glasgow-haskell-bugs" list is for submission of bug reports
- and discussion thereof. Subscribe via
- "glasgow-haskell-bugs-request@dcs.glasgow.ac.uk"; send bug
- reports and rumination thereupon go to "glasgow-haskell-bugs".
-
-Please see the release notes for a complete discussion of What's New.
-
-To run this release, you need a machine with 16+MB memory, GNU C
-(`gcc') [version 2.1 or greater], and `perl'. We have seen GHC work
-in *some* form or fashion on: Sun4s, DEC Alphas, Sun3s, NeXTs,
-DECstations, HP-PA and SGI boxes. Sun4s and Alphas, our development
-platforms, are fully supported. We distribute binaries for them.
-
-*** LATE NEWS: Don't use GCC 2.6.0 on the Alpha ***
-
-Once you have the distribution, please follow the pointers in
-ghc/README to find all of the documentation in and about this release.
-
-Please report bugs to glasgow-haskell-bugs@dcs.glasgow.ac.uk and
-direct general queries to glasgow-haskell-request@<same>.
-
-Simon Peyton Jones
-
-Dated: 94/07/27
-....................................................................
-
-How to get it
-~~~~~~~~~~~~~
-This release is available, in whole or in part, from the usual Haskell
-anonymous FTP sites, in the directory pub/haskell/glasgow:
-
- ftp.dcs.glasgow.ac.uk (130.209.240.50)
- ftp.cs.chalmers.se (129.16.225.66)
- nebula.cs.yale.edu (128.36.13.1)
-
-The Glasgow site is mirrored by src.doc.ic.ac.uk, in
-computing/programming/languages/haskell/glasgow.
-
-These are the available files (.gz files are gzipped) -- some are `on
-demand', ask if you don't see them:
-
-ghc-0.22-bin-sun4.tar.gz A binary distribution -- unpack & run!
- For SunOS 4.1.x; assumes you have GNU C (gcc)
- version 2.x around...
-
-ghc-0.22-bin-alpha.tar.gz A binary distribution -- unpack & run!
- Built on OSF1 V2.0; assumes you have GNU C (gcc).
-
-ghc-0.22-bin-<other>.tar.gz Other binary distributions -- we will
- make them available as we go along; they
- will be announced on the Haskell mailing list
- (not elsewhere).
-
-ghc-0.22-src.tar.gz The basic source distribution; about 3MB.
-
-ghc-0.22-hc-files.tar.gz The intermediate C (.hc) files for the
- compiler proper, the prelude, and `Hello,
- world'. About 4MB.
-
-ghc-0.22.ANNOUNCE This file
-
-ghc-0.22.{README,RELEASE-NOTES} From the distribution; for those who
- want to peek before FTPing...
-
-ghc-0.22-ps-docs.tar.gz Main GHC documents in PostScript format; in
- case your TeX setup doesn't agree with our
- DVI files...
-
-ghc-0.22-hi-files-hbc.tar.gz
- Interface files for the compiler proper
- (ghc/compiler/*/*.hi), to be used if booting
- with HBC. Not recommended, but some might
- want to. (The distributed .hi files assume
- GHC version 0.22.)
-
-There are no diffs from version 0.19, as they would be monstrous.
diff --git a/ghc/docs/ANNOUNCE-0.23 b/ghc/docs/ANNOUNCE-0.23
deleted file mode 100644
index d7e7d942a3..0000000000
--- a/ghc/docs/ANNOUNCE-0.23
+++ /dev/null
@@ -1,124 +0,0 @@
- The Glasgow Haskell Compiler -- version 0.23
- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-A new public release of the Glasgow Haskell Compiler is now available
-(GHC, version 0.23). Binaries and sources are freely available by
-anonymous FTP; details below.
-
-Haskell is "the" standard lazy functional programming language [see
-SIGPLAN Notices, May 1992]. The current language version is 1.2.
-
-GHC is a state-of-the-art batch compiler. For some idea of how it
-compares against the competition, see Pieter Hartel's recent revision
-of his FPCA '93 paper. Reference attached. Summary: we win!
-
-Highlights of what's new in GHC 0.23 since 0.22 (July 1994):
-
- * Faster compilation times (less than 10% slower than HBC if not
- using -O [on a Sun4]).
-
- * Produces 10-15% smaller executables. The main compiler binary is
- 1MB smaller than in 0.22.
-
- * >>> USER-VISIBLE changes <<< to "monadic I/O", because we are
- switching to the Haskell 1.3 *draft* I/O proposal. Please see the
- relevant bit of the User's Guide before doing monadic I/O things
- with 0.23.
-
- * Native-code generator for DEC Alphas.
-
- * A _selective_ lambda lifter.
-
- * The yacc-based parser is now called directly from Haskell.
-
- * Configuration changed enough that "the same old thing" *won't* work.
- Configuring binary distributions should be trivial now.
-
- * Quite a few bugs fixed; the usual big wad of code added.
-
-Please see the release notes for a complete discussion of What's New.
-
-Should you upgrade to 0.23? If you are a contented 0.22 user,
-probably not. Otherwise, probably yes.
-
-To run this release, you need a machine with 16+MB memory, GNU C
-(`gcc'), and `perl'. We have seen GHC work in *some* form or fashion
-on: Sun4s, DEC Alphas, Sun3s, NeXTs, DECstations, HP-PA and SGI boxes.
-Sun4s and Alphas, our development platforms, are fully supported; we
-distribute binaries for them. The release notes give a full
-what-ports-work report.
-
-Once you have the distribution, please follow the pointers in
-ghc/README to find all of the documentation in and about this release.
-NB: preserve modification times when un-tarring (no `m' option for
-tar, please)!
-
-We run mailing lists for GHC users and bug reports; to subscribe, send
-mail to glasgow-haskell-{users,bugs}-request@dcs.glasgow.ac.uk.
-Please send bug reports to glasgow-haskell-bugs.
-
-Simon Peyton Jones
-
-Dated: 94/12/21
-
-======================================================================
-Hartel reference:
-
-@techreport{Har94g,
- author = {P. H. Hartel},
- title = {Benchmarking implementations of lazy functional
- languages {II} -- Two years later},
- institution = {Dept. of Comp. Sys, Univ. of Amsterdam},
- type = {Technical report},
- number = {Cs-94-21},
- month = {Dec},
- year = {1994}}
-
-The paper is available from ftp.fwi.uva.nl,
-file: pub/computer-systems/functional/reports/benchmarkII.ps.Z
-
-The programs are in file: pub/computer-systems/functional/packages/benchmark.tar.Z
-
-======================================================================
-How to get GHC:
-
-This release is available, in whole or in part, from the usual Haskell
-anonymous FTP sites, in the directory pub/haskell/glasgow:
-
- ftp.dcs.glasgow.ac.uk (130.209.240.50)
- ftp.cs.chalmers.se (129.16.227.140)
- haskell.cs.yale.edu (128.36.11.43)
-
-The Glasgow site is mirrored by src.doc.ic.ac.uk (155.198.191.4), in
-computing/programming/languages/haskell/glasgow.
-
-These are the available files (.gz files are gzipped) -- some are `on
-demand', ask if you don't see them:
-
-ghc-0.23-bin-sun4.tar.gz A binary distribution -- unpack & run!
- For SunOS 4.1.x; assumes you have GNU C (gcc)
-
-ghc-0.23-bin-alpha.tar.gz A binary distribution -- unpack & run!
- Built on OSF1 V2.0; assumes you have GNU C (gcc).
-
-ghc-0.23-bin-<other>.tar.gz Other binary distributions -- we will
- make them available as we go along; they
- will be announced on the Haskell mailing list
- (not elsewhere).
-
-ghc-0.23-src.tar.gz The basic source distribution; about 3MB.
-
-ghc-0.23-hc-files.tar.gz The intermediate C (.hc) files for the
- compiler proper, the prelude, and `Hello,
- world'. About 4MB.
-
-ghc-0.23.ANNOUNCE This file
-
-ghc-0.23.{README,RELEASE-NOTES} From the distribution; for those who
- want to peek before FTPing...
-
-ghc-0.23-ps-docs.tar.gz Main GHC documents in PostScript format; in
- case your TeX setup doesn't agree with our
- DVI files...
-
-There are no diffs from version 0.22, as they would be monstrous.
diff --git a/ghc/docs/ANNOUNCE-0.25 b/ghc/docs/ANNOUNCE-0.25
deleted file mode 100644
index a3da0c2e6a..0000000000
--- a/ghc/docs/ANNOUNCE-0.25
+++ /dev/null
@@ -1,54 +0,0 @@
-A binary-only from-working-sources no-guarantees snapshot of the
-Glasgow Haskell compiler (GHC) for Linux x86 machines is now available
-by FTP from ftp.dcs.glasgow.ac.uk, in
-pub/haskell/glasgow/ghc-0.25-linux.tar.gz.
-
-This release is the first, long-awaited "registerized" GHC for Linux,
-which produces code of reasonable size and speed. We use our normal
-technique of "stealing registers" with GCC's
-global-variables-in-registers facility. We "steal" six of the x86's
-eight general-purpose registers, including the C stack-pointer (%esp),
-which we use for the heap pointer (Hp).
-
-To use this GHC, you need a special version of GCC, which is also
-provided in the distribution (under "gcc-linux-to-linux"). Whatever
-you do, please do *not* report any "bugs" in this GCC to bug-gcc --
-report them to *me* instead.
-
-One special thing you must watch out for: If GCC "crashes" with a
-message about spilling registers, it is *not* a GCC problem. It means
-you must get GHC to "back off" in its register "stealing". First try
-a -monly-4-regs flag, then -monly-3-regs, and as a last resort,
--monly-2-regs. As far as we know, all Haskell code goes through GHC
-with a -monly-2-regs flag (but it produces substantially worse code
-with that flag).
-
-Profiling is not provided in this release.
-
-Please report any bugs to glasgow-haskell-bugs@dcs.glasgow.ac.uk.
-
-Will Partain
-AQUA project (slave)
-
-Dated: 95/04/01
-
-=== INSTALLATION NOTES ==============================================
-
-Unpack the distribution.
-
-Move "gcc-linux-to-linux" and "ghc-0.25-linux" wherever you like.
-
-Alter the "gcc" script to point to wherever you've put
-"gcc-linux-to-linux", and put the "gcc" script wherever you wish in
-your PATH.
-
-Make a link to ghc-0.25-linux/ghc/driver/ghc, so that "ghc" will be in
-your PATH.
-
-Change *all* hardwired paths in ghc/driver/ghc and in
-ghc/utils/hscpp/hscpp to point to where things are on your system.
-Notably: where "perl" is (first line of each script), where $TopPwd is
-(ghc script), where your gcc cpp is (hscpp script).
-
-GHC should then work. Try "ghc -v" on something simple, to make sure
-it compiles and links a program correctly.
diff --git a/ghc/docs/ANNOUNCE-0.26 b/ghc/docs/ANNOUNCE-0.26
deleted file mode 100644
index fa352535e8..0000000000
--- a/ghc/docs/ANNOUNCE-0.26
+++ /dev/null
@@ -1,153 +0,0 @@
- The Glasgow Haskell Compiler -- version 0.26
- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-We are proud to announce a new public release of the Glasgow Haskell
-Compiler (GHC, version 0.26). Sources and binaries are freely
-available by anonymous FTP and on the World-Wide Web; details below.
-
-Haskell is "the" standard lazy functional programming language [see
-SIGPLAN Notices, May 1992]. The current language version is 1.2. GHC
-provides some proposed features of 1.3, notably monadic I/O.
-
-The Glasgow Haskell project seeks to bring the power and elegance of
-functional programming to bear on real-world problems. To that end,
-GHC lets you call C (including cross-system garbage collection),
-provides good profiling tools, supports ever richer I/O, and (with
-this release) adds concurrency. Our goal is to make it the "tool of
-choice for real-world applications".
-
-Highlights of what's new in GHC 0.26 since 0.24 (March 1995):
-
- * Concurrent Haskell: with this, you can build programs out of many
- I/O-performing, interacting `threads'. We have a draft paper
- about Concurrent Haskell, and our forthcoming Haggis GUI toolkit
- uses it.
-
- * Parallel Haskell, running on top of PVM (Parallel Virtual Machine)
- and hence portable to pretty much any parallel architecture,
- whether shared memory or distributed memory. With this, your
- Haskell program runs on multiple processors, guided by `par` and
- `seq` annotations. The first pretty-much-everyone-can-try-it
- parallel functional programming system! NB: The parallel stuff is
- "research-tool quality"... consider this an alpha release.
-
- * "Foldr/build" deforestation (by Andy Gill) is in, as are
- "SPECIALIZE instance" pragmas (by Patrick Sansom).
-
- * The LibPosix library provides an even richer I/O interface than
- the standard 1.3 I/O library. A program like a shell or an FTP
- client can be written in Haskell -- examples included.
-
- * Yet more cool libraries: Readline (GNU command-line editing),
- Socket (BSD sockets), Regex and MatchPS (GNU regular expressions).
- By Darren Moffat and Sigbjorn Finne.
-
- * New ports -- Linux (a.out) and MIPS (Silicon Graphics).
-
- * NB: configuration has changed yet again -- for the better, of
- course :-)
-
-Please see the release notes for a complete discussion of What's New.
-
-To run this release, you need a machine with 16+MB memory, GNU C
-(`gcc'), and `perl'. We have seen GHC 0.26 work on these platforms:
-alpha-dec-osf2, hppa1.1-hp-hpux9, i386-unknown-linuxaout,
-m68k-sun-sunos4, mips-sgi-irix5, and sparc-sun-{sunos4,solaris2}.
-Similar platforms should work with minimal hacking effort.
-The installer's guide give a full what-ports-work report.
-
-Binaries are now distributed in `bundles', e.g. a "profiling bundle"
-or a "concurrency bundle" for your platform. Just grab the ones you
-need.
-
-Once you have the distribution, please follow the pointers in
-ghc/README to find all of the documentation about this release. NB:
-preserve modification times when un-tarring the files (no `m' option
-for tar, please)!
-
-We run mailing lists for GHC users and bug reports; to subscribe, send
-mail to glasgow-haskell-{users,bugs}-request@dcs.glasgow.ac.uk.
-Please send bug reports to glasgow-haskell-bugs.
-
-Particular thanks to: Jim Mattson (author of much of the code) who has
-now moved to HP in California; and the Turing Institute who donated a
-lot of SGI cycles for the SGI port.
-
-Simon Peyton Jones and Will Partain
-
-Dated: 95/07/24
-
-Relevant URLs on the World-Wide Web:
-
-GHC home page http://www.dcs.glasgow.ac.uk/fp/software/ghc.html
-Glasgow FP group page http://www.dcs.glasgow.ac.uk/fp/
-comp.lang.functional FAQ http://www.cs.nott.ac.uk/Department/Staff/mpj/faq.html
-
-======================================================================
-How to get GHC 0.26:
-
-This release is available by anonymous FTP from the main Haskell
-archive sites, in the directory pub/haskell/glasgow:
-
- ftp.dcs.glasgow.ac.uk (130.209.240.50)
- ftp.cs.chalmers.se (129.16.227.140)
- haskell.cs.yale.edu (128.36.11.43)
-
-The Glasgow site is mirrored by src.doc.ic.ac.uk (146.169.43.1), in
-computing/programming/languages/haskell/glasgow.
-
-These are the available files (.gz files are gzipped) -- some are `on
-demand', ask if you don't see them:
-
-ghc-0.26-src.tar.gz The source distribution; about 3MB.
-
-ghc-0.26.ANNOUNCE This file.
-
-ghc-0.26.{README,RELEASE-NOTES} From the distribution; for those who
- want to peek before FTPing...
-
-ghc-0.26-ps-docs.tar.gz Main GHC documents in PostScript format; in
- case your TeX setup doesn't agree with our
- DVI files...
-
-ghc-0.26-<platform>.tar.gz Basic binary distribution for a particular
- <platform>. Unpack and go: you can compile
- and run Haskell programs with nothing but one
- of these files. NB: does *not* include
- profiling (see below).
-
- <platform> ==> alpha-dec-osf2
- hppa1.1-hp-hpux9
- i386-unknown-linuxaout
- i386-unknown-solaris2
- m68k-sun-sunos4
- mips-sgi-irix5
- sparc-sun-sunos4
- sparc-sun-solaris2
-
-ghc-0.26-<bundle>-<platform>.tar.gz
-
- <platform> ==> as above
- <bundle> ==> prof (profiling)
- conc (concurrent Haskell)
- par (parallel)
- gran (GranSim parallel simulator)
- ticky (`ticky-ticky' counts -- for implementors)
- prof-conc (profiling for "conc[urrent]")
- prof-ticky (ticky for "conc[urrent]")
-
-ghc-0.26-hc-files.tar.gz Basic set of intermediate C (.hc) files for the
- compiler proper, the prelude, and `Hello,
- world'. Used for bootstrapping the system.
- About 4MB.
-
-ghc-0.26-<bundle>-hc-files.tar.gz Further sets of .hc files, for
- building other "bundles", e.g., profiling.
-
-ghc-0.26-hi-files-<blah>.tar.gz Sometimes it's more convenient to
- use a different set of interface files than
- the ones in *-src.tar.gz. (The installation
- guide will advise you of this.)
-
-We could provide diffs from previous versions of GHC, should you
-require them. A full set would be very large (7MB).
diff --git a/ghc/docs/ANNOUNCE-0.27 b/ghc/docs/ANNOUNCE-0.27
deleted file mode 100644
index 843b3e29e6..0000000000
--- a/ghc/docs/ANNOUNCE-0.27
+++ /dev/null
@@ -1,72 +0,0 @@
-A binary-only from-working-sources no-guarantees snapshot of the
-Glasgow Haskell compiler (GHC) for i386-unknown-linuxaout and
-i386-unknown-solaris2 platforms is now available from
-ftp://ftp.dcs.glasgow.ac.uk/pub/haskell/glasgow/ghc-0.27-<platform>.tar.gz.
-(The files ghc-0.26-docs-and-examples.tar.gz and
-ghc-0.26-ps-docs.tar.gz [PostScript] may also be of interest.)
-
-This pseudo-release adds profiling and concurrent-Haskell support for
-i386-*-linuxaout. It is the first GHC that works on i386-*-solaris2
-machines (sequential, profiling, and concurrent support provided).
-
-As 0.27 is a snapshot and not a "proper" release, it may have serious,
-show-stopping bugs in it. If you *need* what 0.27 provides, use it;
-otherwise, you should stick with 0.26.
-
-It should be relatively straightforward to tweak
-ghc/driver/ghc-asm.(l)prl to support Linux ELF format; ditto for other
-Unices on x86 platforms. Please let us know if you make such changes.
-
-GCC 2.7.x is required; GCC 2.6.x will *not* work.
-
-Binaries (.o files and executables) produced by GHC 0.27 cannot be
-intermixed with those from GHC 0.26 or 0.25; you'll need to recompile
-everything.
-
-The -concurrent stuff *definitely* has at least one bug we haven't
-been able to catch. Concurrent programs that show
-readily-reproducible buggy behavior would be most welcome.
-
-The profiling libraries for *solaris2 are huge, for reasons we don't
-understand. If you need to scrap them for space reasons, see the end
-of the installation notes below. Insights into the problem would also
-be most appreciated.
-
-Please report any bugs to glasgow-haskell-bugs@dcs.glasgow.ac.uk.
-
-Will Partain
-AQUA project (slave)
-
-Dated: 95/12/20
-
-=== INSTALLATION NOTES ==============================================
-
-Ignore the installation instructions in any documentation. This is
-the stuff that applies for this distribution.
-
-Unpack the distribution.
-
-Move "ghc-0.27-<platform>" to wherever you like.
-
-Make a link to ghc-0.27-<platform>/ghc/driver/ghc, so that "ghc" will
-be in your PATH.
-
-Change the hardwired paths in ghc/driver/ghc and in
-ghc/utils/hscpp/hscpp to point to where things are on your system.
-(Also: ghc/utils/mkdependHS/mkdependHS, if you want to use it.)
-Notably: where "perl" is (first line of each script), where $TopPwd is
-(ghc script), where your gcc cpp ($OrigCpp) is (hscpp and mkdependHS
-scripts). *Don't* set any environment variables to do this.
-
-GHC should then work. Try "ghc -v" on something simple, to make sure
-it compiles and links a program correctly.
-
-If you don't want the profiling libraries (e.g., to save disk space), do:
-
- cd ghc
- rm runtime/*_p.a lib/*_p.a
-
-If you don't want to concurrent-Haskell libraries (e.g., same reason), do:
-
- cd ghc
- rm runtime/*_mc.a lib/*_mc.a
diff --git a/ghc/docs/Jmakefile b/ghc/docs/Jmakefile
index 9e9510cac4..ca56ede272 100644
--- a/ghc/docs/Jmakefile
+++ b/ghc/docs/Jmakefile
@@ -7,15 +7,7 @@
#define NoInstallTargetForSubdirs
#define NoTagTargetForSubdirs
-SUBDIRS = add_to_compiler \
- users_guide \
+SUBDIRS = users_guide \
install_guide \
release_notes \
state_interface
-
-XCOMM developers_guide ?
-XCOMM interfaces ?
-XCOMM pragmas ?
-
-XCOMM grasp_overview ?
-XCOMM style_guide ?
diff --git a/ghc/docs/NOTES.adding-PrimOp b/ghc/docs/NOTES.adding-PrimOp
deleted file mode 100644
index 2d5b475ec1..0000000000
--- a/ghc/docs/NOTES.adding-PrimOp
+++ /dev/null
@@ -1,51 +0,0 @@
-This is a short note describing how I (ADR <areid@dcs.glasgow.ac.uk>)
-added a new primitive operation (makeStablePtr#) to the compiler. It
-serves as documentation of what I did and as a guide to anyone else
-wanting to try it.
-
-1) Change compiler/prelude/PrimOps.lhs:
-
- - add @MakeStablePtrOp@ to the datatype @PrimitiveOp@.
-
- - add the following case to @primOpInfo@
-
- primOpInfo MakeStablePtrOp
- = AlgResult "makeStablePtr#" []
- [(ioWorldTy `UniFun` intPrimAndIoWorldTy), ioWorldTy]
- intPrimAndIoWorldTyCon []
- -- makeStablePtr# :: IO_Int# -> IO_Int#
- -- == makeStablePtr# :: (IoWorld -> (Int#, IoWorld)) -> (IoWorld -> (Int#, IoWorld))
-
-2) Change compiler/prelude/AbsPrel.lhs:
-
- - add @MakeStablePtrOp@ to an appropriate place in @list_of_val_assoc_lists@
-
- (This makes the operation visible to the programmer).
-
- Since this is a glasgow extension, I added it to one of
- @extra_known_vals_2@, @unboxed_ops@, @boxed_ops@. @unboxed_ops@
- is made up of several lists of operators including
- @prim_ops_used_unboxed_only@. By inspection I decided that this
- (@prim_ops_used_unboxed_only@) was the one to go for.
-
-At this point I started recompiling the compiler - this took a long
-time since the change to @PrimitiveOp@ changed the @.hi@ file
-resulting in a complete (or near as makes no odds) recmpilation of the
-compiler. (Is there a way of using fastmake that avoids this?
-
-3) Change imports/StgMacros.lh to generate code for @MakeStablePtr#@
-
- - this is just adding a macro that calls the appropriate operation.
-
- (I suspect I could omit this step if I wanted since all this does
- (ahem, will do) is call a function in the runtime system.)
-
-4) Change runtime/storage/SMap.lc to implement the new operation.
-
- I won't bother describing this just now.
-
-
-This is a little untidy. I should perhaps add a new flag to the system
-which turns my extension off and checks that it is only used in
-conjunction with the Appel generational collector. But we're going to
-do the same to every collector eventually aren't we?
diff --git a/ghc/docs/NOTES.arbitary-ints b/ghc/docs/NOTES.arbitary-ints
deleted file mode 100644
index 964a2cf5be..0000000000
--- a/ghc/docs/NOTES.arbitary-ints
+++ /dev/null
@@ -1,54 +0,0 @@
-
-Boxed structure of BigInts
-
-
-----> Info1 Pointer
- | Pointer passed to BigNum package
- | |
- \/ \/
- Info2 Size Integer ....
-
- (size excludes info ptr & size field)
-
-Unboxed (Compiler must place on pointer stack not data stack
- Must also tell GC if it is in a register when GC invoked)
-
-----> Info2 Size Integer
-
-
-
-Info1:
- SPEC_INFO_TABLE(Info1, BigNum_entry, 1, 1); (Min Size 2 ?)
-
- Entering this returns the BigNum using agreed return convention
-
-Info2:
- DATA_INFO_TABLE(Info2, Dummy_entry);
-
- This never actually entered -- just required for GC.
-
-------------------------------------------------------------------------------
-
-Boxed structure of BigInts (the alternative one)
-
- Pointer passed to BigNum package
- |
- \/
-----> Info Size Integer ....
-
- (size excludes info ptr & size field)
-
-Unboxed (Compiler must place on pointer stack not data stack
- Must also tell GC if it is in a register when GC invoked)
-
-
-Info:
- DATA_INFO_TABLE(Info, BigNum_entry);
-
- Entering this returns the BigNum using agreed return convention
-
-
-
-Note that the Boxed and Unboxed representation are identical !!!
-
-(unboxing represents evaluationhood, not pointerhood)
diff --git a/ghc/docs/NOTES.c-optimisation b/ghc/docs/NOTES.c-optimisation
deleted file mode 100644
index 3320ae1d3d..0000000000
--- a/ghc/docs/NOTES.c-optimisation
+++ /dev/null
@@ -1,2361 +0,0 @@
-Optimisation of C-code (runtime and compiled)
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-- Placing of STG registers in machine registers
-- Optimisation of interpreter loop (tail calls)
-
-/* TODO: flags */
--OC flag to ghc causes optimisation
-
-
-ANSI C
-~~~~~~
-For functions with no args we declare them as
-
- foo( STG_NO_ARGS )
-
-rather than foo(), because you can tell ANSI C a little more
-by making STG_NO_ARGS expand to void. Maybe something to do with
-forward declarations?
-
-
-Optimisation with GCC
-~~~~~~~~~~~~~~~~~~~~~
-
-We are concentrating most optimisation with gcc which allows
-suitable global register declarations.
-
-
-REGISTERS:
-
-See StgOpt.h for a description of register usage
-
-Note that all modules that reference the STG registers must be
-compiled the same way so they look at the registers and not the
-global variables.
-
-
-TAIL CALLS:
-
-Seperate modules for tail call optimisations are required.
-Requitre to partition runtime system code.
-
-.hc:
- Modules with tail call routines (most runtime and all compiled)
- are labeled .hc (literate = .lhc).
- These are compiled to assember with tail call optimisation on
- and then post processed with sed (Yuk!)
-
- All routines which return a continuation (STGJUMP) must be
- compiled this way.
-
- (The only exeption to this is continuations which exit/abort which
- may live in .c files)
-
-.c:
- These modules are not compiled with the tail call optimisation
- and don't have sed processing.
- Sed processing would destroy the code!
-
- All routines which are not continuations (called and return
- conventionally) must be compiled this way.
-
- This includes various parts of the runtime system.
-
-
-
-
-See Also ~sansom/work/gcstats/OBSERVATIONS
-
-
-
-
-Info Recieved from Eliot Miranda:
-
-Received: from dcs.glasgow.ac.uk (tutuila) by vanuata.dcs.glasgow.ac.uk; Thu, 4 Jul 91 09:40:34 BST
-Message-Id: <15456.9107040841@dcs.glasgow.ac.uk>
-X-Comment1: #############################################################
-X-Comment2: # uk.ac.glasgow.cs has changed to uk.ac.glasgow.dcs #
-X-Comment3: # If this address does not work please ask your mail #
-X-Comment4: # administrator to update your NRS & mailer tables. #
-X-Comment5: #############################################################
-To: simonpj
-Cc: sansom
-Subject: Miranda's original msg
-Date: Thu, 04 Jul 91 09:41:19 +0100
-From: Simon L Peyton Jones <simonpj>
-
-
-
-
-
->From eliot@.cs.qmw.ac.uk Mon Apr 1 11:16:06 1991
-From: eliot@.cs.qmw.ac.uk (Eliot Miranda)
-Newsgroups: comp.compilers
-Subject: Portable Fast Direct Threaded Code
-Keywords: interpreter, design
-Date: 28 Mar 91 12:20:06 GMT
-Reply-To: Eliot Miranda <eliot@.cs.qmw.ac.uk>
-Organization: Computer Science Dept, QMW, University of London, UK.
-
-Various people have asked me for details on how I do threaded code
-in my dynamic translation Smalltalk-80 VM. So here's the gory details
-as well as the first published benchmarks for the system.
-
- How to do "Machine-Independent" Fast Direct Threaded Code:
-
-
-First off, use C (although other flexible machine-oriented imperative
-languages would probably work too).
-
-Global registers:
- If you can use GCC >v1.33 you can use global register variables to
-hold the threaded code machine's registers. If you have various forms of
-stupid C compiler then you can get global register variables by declaring
-your globals as register variables in every function, and later editing the
-assembler generated by the C compiler to remove global register saves &
-restores (details in [Miranda]).
-
-
-Threaded Code:
- Threaded code instructions (TCIs) are written as C procedures.
-They are compiled to assembler by the C compiler. Subsequently a simple
-editor script is run on the assembler to remove the prologues and epilogues
-from the threaded code procedures, and possibly to insert direct threaded
-code jumps.
-
-How to Identify Threaded Code Instructions:
- The method I prefer is to segregate TCIs from other procedures &
-functions in the machine by placing related groups of TCIs in separate
-source files. I give my threaded code source files the .tc extension and
-they have a rule in the makefile that will run the editor script on the
-assembler. An alternative is to identify each threaded code procedure with
-a special prefix which is spotted by the editor script. This is probably
-more error prone & doesn't fit well with leaf-routine optimization (see
-below).
-
-How to Write Threaded Code Instructions:
-Each TCI is writen an a void function of no arguments. It is wise to start
-and end each TCI with two special macros to replace '{' and '}'. So, using
-GCC on the SPARC, given some declarations:
-
-
- typedef void (*TCODE)(); /* a TCODE is a pointer to a function */
- typedef ???? ObjectPointer;
-
- . . .
-
- register TCODE *tcip asm("%g7"); /*threaded code instruction pointer*/
- register ObjectPointer *stackPointer asm("%g5");
-
-e.g. popStack would be written:
-
- void popStack()
- TBEGIN
- stackPointer--;
- TEND
-
-With GCC TBEGIN is
-
- #define TBEGIN {
-
-With stupid C compilers it can be defined to be the list of global register
-variables. Further, if you want to build a debuger for your threaded code
-machine you could compile the system with
-
- #define TBEGIN { int frig = checkForBreakPoint();
-
-and ignore lots of warnings about variable frig being unused :-).
-
-TEND has to do a direct threaded code jump. In my system I want an indirect
-post-increment jump on tcip; i.e. jump to *tcip++. On the SPARC with tcip
-in %g7 the jump is
-
- ld [%g7],%o0 ! get *tcip
- jmpl %o0,%g0 ! jump to it
- add %g7,4,%g7 ! increment tcip in the jump's delay slot
-
-On the 68k with tcip in a5 the jump is
-
- movl a5@+,a0
- jmp a0@
-
-With GCC this is implemented by the JUMPNEXT macro. On the SPARC:
- #define JUMPNEXT do{ \
- asm("ld [%g7],%o0; jmpl %o0,%g0; add %g7,4,%g7");\
- return;}while(0)
-
-Note the return, it tells the compiler that control does not pass this point.
-On the 68k:
- /* SBD = Silent But Deadly = Stack Bug Dummy. gcc has a bug with
- no-defer-pop. it always depers the pop of the last function call in
- a routine. SBD is a dummy call to ensure no other previous call gets
- its pop deferred.
- */
- extern void SBD P((void));
-
- #define JUMPNEXT do{ \
- asm("movl a5@+,a0; jmp a0@");\
- SBD();return;}while(0)
-
-SBD is then removed by the editor script.
-
-So TEND is defined to be
- #define TEND JUMPNEXT; }
-
-On the SPARC popStack is expanded to
- void popStack()
- {
- stackPointer--;
- do{asm("ld [%g7],%o0; jmpl %o0,%g0; add
-%g7,4,%g7");return;}while(0);
- }
-
-Its compiled to:
- _popStack:
- !#PROLOGUE# 0
- save %sp,-80,%sp
- !#PROLOGUE# 1
- add %g5,-4,%g5
- ld [%g7],%o0; jmpl %o0,%g0; add %g7,4,%g7
- ret
- restore
-The editor script then reduces this to:`
- _popStack:
- ! [gotcher]
- add %g5,-4,%g5
- ld [%g7],%o0; jmpl %o0,%g0; add %g7,4,%g7
-
-On the 68k you end up with:
- .globl _popStack
- _popStack:
- subqw #4,a3
- movl a5@+,a0; jmp a0@
-
-Global Register Variables and Stupid C Compilers:
- Some C compilers are stupid enough to provide straight-forward global
-registers. A C compiler on a nat-semi based system I used just allocated
-registers in the order they were declared. The assembler syntax was very
-simple to edit. Global register variables could thus be obtained easily.
-
- Some C compilers are stupid but think they're clever. Sun's SUN3
-compiler is a case in point. It also allocates registers in the order declared.
-However, it tries to be clever by removing 'dead assignments' (assignments to
-subsequently unused register variables). These compilers are easy to fool.
-Simply arrange that the register variables are always used before leaving a
-function. I do this by always writing RETURN or RETURNV(e) instead of
-return and return e. On systems with such stupid C compilers RETURN(e)
-is defined thus:
-
- #define RETURNV(e) do{DummyUseRegs(GR1,GR2,GR3); return e;}while(1)
-
-The call on DummyUseRegs fools the compiler into thinking the registers
-are live & hence saves assignments to them. The editor scripts can then
-remove calls on DumyUseRegs.
-
- Of course on systems with marginally clever C compilers (SUN4
-HP-UX etc)
-you're stuffed. However, in clever C compilers like GCC and Acorn's C compiler
-you can declare global registers & everything is clean & wholesome :-).
-
-
-
-Conditional TCODE Jumps:
- Say we wanted a conditional tcode jump. This might be writen:
- void skipIfTrue()
- TBEGIN
- if (*stackPointer-- == TrueObject) {
- tcip += 1;
- JUMPNEXT;
- }
- TEND
-
-How this All Works:
-With the above scheme, each threaded code procedure runs in the same C
-stack frame, and jumps directly to the next procedure, eliminating an
-unnecessary <epilogue, return>, <call, prolog> pair. Once we establish a
-stack frame and call the first function away we go. Assuming that you've
-produced your first threaded code method (a sequence of pointers to
-threaded code procedures), and that tcip points at the start, then
-StartTCMachine might be defined as follows:
-
- volatile void
- StartTCMachine()
- { char *enoughSpaceForAllTCIStackFrames;
-
- enoughSpaceForAllTCIStackFrames = alloca(1024);
-
- while(1) { (*tcip++)(); }
- }
-
-The alloca allocates space on the stack. After the first (*tcip++)()
-control goes off into threaded code land and never returns.
-
-Leaf Routine Optimization:
-The threaded code machine will make calls on support routines e.g.
-graphics, garbage collector etc. Any group of routines that dont access
-the global registers and don't directly or indirectly call routines that
-need to access the global registers can be optimized. These routines
-should be compiled without declaring the global registers. These routines
-will then use as many registers as the compiler will give them and will
-save & restore any registers they use, preserving the values of the global
-register variables.
-
-
-Details of my Smalltalk Threaded Code Machine:
- I use a pair of words for each TCI, a pointer to the procedure followed
-by an optional operand. This avoids going out of line to access arguments.
-e.g. pushLiteral is:
- void pushLit()
- TBEGIN
- *++stackPointer = (OOP)*tcip++;
- TEND
-where OOP is an ordinary object pointer. So on entry to push lit we have:
- <pointer to pushLit>
- tcip-> <object pointer>
- <pointer to next TCI>
- <next TCI's operand>
-and popStack must therefore be written
- void popStack()
- TBEGIN
- stackPointer--;
- tcip++;
- TEND
-
-I dynamically compile Smalltalk-80 bytecodes to threaded code. I use 128k
-bytes of memory to hold all threaded code. This 'tspace' is periodically
-scavenged to reclaim space. The architecture is similar to
-[DeutschSchiffman]. Using an eighth of the space used by the Deutch
-Schifman machine I get around 75% of the performance on the non-graphics
-benchmarks. Here are the Smalltalk macro benchmarks for BrouHaHa
-Smalltalk-80 v2.3.2t running on a monochrome SUN3/60 (20MHz 68020):
-
- BitBLT 76.7308
- TextScanning 222.857
- ClassOrganizer 80.6667
- PrintDefinition 59.0278
- PrintHierachy 142.857
- AllCallsOn 112.5
- AllImplementors 130.0
- Inspect 116.667
- Compiler 86.4341
- Decompiler 101.639
- KeyboardLookAhead 212.5
- KeyboardSingle 302.778
- TextDisplay 148.837
- TextFormatting 273.81
- TextEditing 180.342
- Performance Rating 134.198
-
-and on the same machine under the same conditions are the timings for
-ParcPlace Smalltalk-80 V2.3:
-
- BitBLT 99.75
- TextScanning 390.0
- ClassOrganizer 155.128
- PrintDefinition 137.097
- PrintHierachy 192.308
- AllCallsOn 120.0
- AllImplementors 108.333
- Inspect 146.774
- Compiler 118.617
- Decompiler 129.167
- KeyboardLookAhead 303.571
- KeyboardSingle 473.913
- TextDisplay 172.973
- TextFormatting 442.308
- TextEditing 285.135
- Performance Rating 189.504
-
-134.198/189.504 = 0.708154
-
-WARNING!! These systems ARE different, these benchmarks are included only
-to give a feeling for ball-park performance.
-Example differences:
- BrouHaHa ParcPlace
- closures blue-book BlockContexts
- immediates:
- characters, smallints, fixedpoints immediate smallintegers
- 5844 compiled methods 5136 compiled methods
- (5026 ordinary methods) (4798 ordinary methods)
- (818 quick methods) (338 quick methods)
-
-
-
-More Portable File Organization:
-To keep the code as clean looking as possible all machine-dependencies are
-isolated in separate files. e.g. tcode.h gives machine independent
-definitions for TCODE. It includes machine dependencies from another file:
-
- /* for debugging purposes; single step breakpoint at start of
-each tcode
- */
- #define DEBUG_FETCH_BREAK int frig = fetchBrk();
-
- #ifdef FAST
- # include "fasttcode.h"
- #else
-
- TCODE *tcip; /* the tcode ip points at TCODEs */
-
- # define JUMPNEXT return
- # ifdef BIDebug
- # define TBEGIN { DEBUG_FETCH_BREAK
- # else
- # define TBEGIN {
- # endif
- # define TEND }
- #endif
-
-GCC/SPARC/fasttcode.h:
- /* tcodeip in g7 */
- register TCODE *tcip asm("%g7");
-
- #define JUMPNEXT do{asm("ld [%g7],%o0; jmpl %o0,%g0; add
-%g7,4,%g7");return;}while(0)
-
- #ifdef BIDebug
- # define TBEGIN { DEBUG_FETCH_BREAK
- #else
- # define TBEGIN {
- #endif
- #define TEND JUMPNEXT; }
-
-I also don't want to include the necessary definitions for the global registers
-in every file. So for those non-leaf routines that must avoid using the
-global registers there's a fastglobal.h file that gives dummy definitions for
-these registers. e.g. GCC/SPARC/fastglobal.h:
- /* machine specific FAST defines.
- Gnu C 1.33 systems can use nice compiler provided global registers.
- */
-
- #define BEGIN {
- #define END }
- #define RETURN(e) return e
- #define RETURNV return
-
- register char *GlobRegDummy1 asm("a5");
- register char *GlobRegDummy2 asm("a4");
- register char *GlobRegDummy3 asm("a3");
- register char *GlobRegDummy4 asm("d6");
-
- #ifdef MASKREGISTER
- register char *GlobRegDummy5 asm("d7");
- #endif
-
-I use symbolic links to set up the machine dependent include files.
-This has the
-advantage that if you add a new machine you don't have to remake all
-the others.
-
-
-The Tedious Bit:
-The only tedious bit is writing the sed-scripts. For the SPARC this took 1 day.
-Here are the sed scripts I use for SUN 3, MAC2AUX (using GAS) and SUN4,
-all using GCC (v1.33 upwards). There's a problem on the SPARC in that the ABI
-does not seem to define the status of the global registers. Some math and
-library routines stomp on the global registers (beware getwd!), so I've
-included
-GCC/SUN4/sed.globreg.bugfix as an example of how to spot the offending math
-routines:
-
-SUN3/GCC/lib/sed.tcode.opt:
-# script to strip prolog & epilog from threaded code under gcc.
-# WARNING the script may strip a push of a register argument if a call is the
-# first statement of a function!!
-#
-/^_.*:$/{n
-N
-N
-s/ link a6,#[^\n]*\n//
-/ fmovem #[^\n]*,sp@-/{
-N
-s/ fmovem #[^\n]*,sp@-\n//
-}
-s/ moveml .*,sp@-\n//
-s/ movel [ad][0-7],sp@-\n//
-}
-/ moveml a6@(-[1-9][0-9]*),#/{N
-s/ moveml a6@(-[1-9][0-9]*),#[^\n]*\n unlk a6//
-}
-/ movel a6@(-[1-9][0-9]*),[ad][0-7]/{N
-s/ movel a6@(-[1-9][0-9]*),[ad][0-7]\n unlk a6//
-}
-/ movel sp@+,/d
-/ moveml sp@+,#/d
-/ unlk a6/d
-/ rts/d
-/ jbsr _SBD/d
-
-MAC2AUX/GCC/lib.gas/sed.tcode.opt:
-/COMMENT/{
-i\
- script to strip prolog & epilog from threaded code under gcc. WARNING \
- the script may strip a push of a register argument if a call is the\
- first statement of a function!!
-}
-/^gcc_compiled:/d
-/^.[^%].*:$/{ n
-/ link %a6/{
-N
-N
-s/ link %a6,#[x0-9-]*\n//
-/ fmovem #[^\n]*,%sp@-/{
-N
-s/ fmovem #[^\n]*,%sp@-\n//
-}
-s/ moveml #[x0-9a-f]*,%sp@-\n//
-s/ movel %[ad][0-7],%sp@-\n//
-n
-}
-}
-/ moveml -[1-9][0-9]*%a6@,#/{ N
-s/ moveml -[1-9][0-9]*%a6@,#[x0-9a-f-]*\n unlk %a6//
-}
-/ movel -[1-9][0-9]*%a6@,%[ad][0-7]/{ N
-s/ movel -[1-9][0-9]*%a6@,%[ad][0-7]\n unlk %a6//
-}
-/ movel %sp@+,%/d
-/ moveml %sp@+,#/d
-/ movel %d0,%a0/{
-N
-s/ movel %d0,%a0\n unlk %a6//
-/ movem*l %a6/{
-N
-s/ movel %d0,%a0\n movem*l %a6.*\n unlk %a6//
-/ fmovem %a6/{
-N
-s/ movel %d0,%a0\n movem*l %a6.*\n fmovem %a6.*\n unlk %a6//
-}
-}
-}
-/ unlk %a6/d
-/ rts/d
-/ jbsr SBD/d
-
-
-SUN4/GCC/lib/sed.tcode.opt:
-# script to strip prolog & epilog from threaded code under gcc.
-#
-/^_.*:$/{n
-N
-N
-s/ !#PROLOGUE# 0\n save %sp,[-0-9]*,%sp\n !#PROLOGUE# 1/ ! [gotcher]/
-}
-/ ret/d
-/ restore/d
-
-SUN4/GCC/lib/sed.globreg.bugfix:
-# Some of the libc builtin routines (.rem .urem .div & .udiv so far known)
-# stamp on %g3 which is the maskReg (it contains 0x7FFFFF).
-# This script reassigns the value of maskReg after each of these routines
-# has been called.
-/call[ ]\.div,[0-9]/{n
-n
-i\
- sethi %hi(0x7FFFFF),%g3 ![globregfix]\
- or %lo(0x7FFFFF),%g3,%g3
-}
-/call[ ]\.udiv,[0-9]/{n
-n
-i\
- sethi %hi(0x7FFFFF),%g3 ![globregfix]\
- or %lo(0x7FFFFF),%g3,%g3
-}
-/call[ ]\.rem,[0-9]/{n
-n
-i\
- sethi %hi(0x7FFFFF),%g3 ![globregfix]\
- or %lo(0x7FFFFF),%g3,%g3
-}
-/call[ ]\.urem,[0-9]/{n
-n
-i\
- sethi %hi(0x7FFFFF),%g3 ![globregfix]\
- or %lo(0x7FFFFF),%g3,%g3
-}
-
-
-You can now see why I put "Machine-Independent" in quotes. Here's the count
-of machine dependent code for the SPARC:
-
- 25 99 786 fastcache.h
- 68 262 1882 fastglobal.h
- 31 112 906 fasttcode.h
- 28 80 595 ../tcsrc/SUN4/GCC/lib/sed.globreg.bugfix
- 5 34 222 ../tcsrc/SUN4/GCC/lib/sed.peep.opt
- 9 30 173 ../tcsrc/SUN4/GCC/lib/sed.tcode.opt
- 166 617 4564 total
-
-Of these 166 lines 51 lines are banner headers. 100 odd lines are
-machine dependent. A whole VM is around the 20k lines mark. So
-machine dependencies are down in the 0.5% range.
-
-
-
-Use this stuff as part of what ever you like. If you try & assert ownership
-I'll fight & batter you over the head with the GPL ('bout time we had some
-serious steel in that thar GPL).
-
-Share And Enjoy!
-
-P.S. The BrouHaHa machine is available to educational institutions with a
-valid ParcPlace Smalltalk-80 licence, subject to a strict non-disclosure
-agreement. email me if you want it. I am slow to answer requests!
---
-Send compilers articles to compilers@iecc.cambridge.ma.us or
-{ima | spdcc | world}!iecc!compilers. Meta-mail to compilers-request.
-
->From crowl@cs.rochester.edu Tue Apr 2 10:34:53 1991
-From: crowl@cs.rochester.edu (Lawrence Crowl)
-Newsgroups: comp.compilers
-Subject: Re: Portable Fast Direct Threaded Code
-Keywords: interpreter, design
-Date: 31 Mar 91 18:06:35 GMT
-Reply-To: crowl@cs.rochester.edu (Lawrence Crowl)
-Organization: Computer Science Department University of Rochester
-
-In article <3035@redstar.cs.qmw.ac.uk>
-Eliot Miranda <eliot@.cs.qmw.ac.uk> writes:
->The threaded code machine will make calls on support routines, e.g. graphics,
->garbage collector etc. Any group of routines that don't access the global
->registers and don't directly or indirectly call routines that need to access
->the global registers can be optimized. These routines should be compiled
->without declaring the global registers. These routines will then use as many
->registers as the compiler will give them and will save & restore any
->registers they use, preserving the values of the global register variables.
-
-This scheme assumes that procedure calls use a "callee saves" register
-convention, and does not work if you allocate the global register
-variables out of the "caller saves" set of registers. The problem is that
-the caller does not save the register (because it is global) and the
-callee writes over the register (because the caller saved it). In this
-situation, the programmer must insert explicit saves and restores of the
-global register variables.
-
-The obvious solution to this problem is to allocate all global register
-variables out of the "callee saves" set of registers. However, the
-Alliant has _no_ callee saves registers. Library routines on the Alliant
-will trash every register you have. In addition, implicit library calls
-to routines like bcopy will also trash your registers. (I learned this
-the hard way.)
-
-The lesson is that calling library routines with global register variables in
-caller saves registers requires special handling. It is not automagic.
---
- Lawrence Crowl 716-275-9499 University of Rochester
- crowl@cs.rochester.edu Computer Science Department
- ...!rutgers!rochester!crowl Rochester, New York, 14627-0226
---
-Send compilers articles to compilers@iecc.cambridge.ma.us or
-{ima | spdcc | world}!iecc!compilers. Meta-mail to compilers-request.
-
->From Tom.Lane@G.GP.CS.CMU.EDU Wed Apr 3 10:38:09 1991
-From: Tom.Lane@G.GP.CS.CMU.EDU
-Newsgroups: comp.compilers
-Subject: Re: Portable Fast Direct Threaded Code
-Keywords: interpreter, design
-Date: 1 Apr 91 15:21:14 GMT
-Reply-To: Tom.Lane@G.GP.CS.CMU.EDU
-Organization: Compilers Central
-
-Lawrence Crowl points out one important problem with Eliot Miranda's
-scheme for global register use in C. There's an even more fundamental
-problem, though: there is *no guarantee whatever* that the compiler will
-assign the same registers to the global variables in every routine.
-
-Compilers that do intelligent allocation of variables to registers may
-refuse to honor the "register" declarations at all if the global variables
-are not heavily used in a given routine, and in any case the globals need
-not be assigned to the same registers every time. Miranda's scheme thus
-relies heavily on the assumption of a dumb register allocator (or else a
-compiler that supports global register variable declarations).
-
-This scheme may be "fast" direct threaded code, but it's hardly "portable".
---
- tom lane
-Internet: tgl@cs.cmu.edu BITNET: tgl%cs.cmu.edu@cmuccvma
-[GCC lets you specify what register to use for global register variables, but
-that is of course both machine and compiler specific. -John]
---
-Send compilers articles to compilers@iecc.cambridge.ma.us or
-{ima | spdcc | world}!iecc!compilers. Meta-mail to compilers-request.
-
->From pardo@cs.washington.edu Thu Apr 4 17:34:39 1991
-From: pardo@cs.washington.edu (David Keppel)
-Newsgroups: comp.compilers
-Subject: Re: Portable Fast Direct Threaded Code
-Keywords: interpreter, design, bibliography
-Date: 2 Apr 91 19:21:25 GMT
-Reply-To: pardo@cs.washington.edu (David Keppel)
-Organization: Computer Science & Engineering, U. of Washington, Seattle
-
-metzger@watson.ibm.com (Perry E. Metzger) writes:
->[I'd like a reference on threaded code interpreters.]
-
-3 citations follow:
-
-%A James R. Bell
-%T Threaded Code
-%J Communications of the ACM (CACM)
-%V 16
-%N 2
-%D June 1973
-%P 370-372
-%X Describes the basic idea of threaded code.
-Compares to hard code (subroutine calls) and interpreters.
-
-%A Richard H. Eckhouse Jr.
-%A L. Robert Morris
-%T Minicomputer Systems Organization, Programming, and Applications
-(PDP-11). 2nd Ed.
-%I Prentice-Hall, Inc.
-%P 290-298
-%X Describes threaded code and ``knotted code''. I (pardo) think that
-this is a very clear introduction to threaded code.
-
-%A Peter M. Kogge
-%T An Architectural Trail to Threaded Code Systems
-%J IEEE Computer
-%P 22-33
-%D March 1982
-%W rrh (original)
-%W Pardo (copy)
-%X Describes how to build a threaded code interpeter/compiler from
-scratch.
- * Direct threaded/indirect threaded.
- * Less than 2:1 performance hit of threading compared to full
-compilation.
- * Note about bad compilers contributing to relative OK-ness of
-threaded code.
- * Ease of rewriting stuff.
- * Ease of tuning.
-
-My favorite of the three is Eckhouse & Morris; however I don't know
-where to get it. The pages that I have are photocopies graciously
-sent to me by a friend. As the title implies, this book is several
-years old and undoubtedly out-of-print.
-
- ;-D on ( Following this thread of the discussion... ) Pardo
---
-Send compilers articles to compilers@iecc.cambridge.ma.us or
-{ima | spdcc | world}!iecc!compilers. Meta-mail to compilers-request.
-
->From simonpj Fri Apr 5 09:52:33 1991
-Received: from tutuila.dcs.glasgow.ac.uk by vanuata.cs.glasgow.ac.uk;
-Fri, 5 Apr 91 09:52:27 BST
-Message-Id: <2763.9104050851@tutuila.dcs.glasgow.ac.uk>
-X-Comment1: #############################################################
-X-Comment2: # uk.ac.glasgow.cs has changed to uk.ac.glasgow.dcs #
-X-Comment3: # If this address does not work please ask your mail #
-X-Comment4: # administrator to update your NRS & mailer tables. #
-X-Comment5: #############################################################
-From: Simon L Peyton Jones <simonpj>
-To: eliot@cs.qmw.ac.uk
-Cc: simonpj, partain
-Subject: Threaded code
-Date: Fri, 05 Apr 91 09:51:48 +0100
-
-
-Eliot
-
-I saw your article about threaded code. Like you and others, we are
-using C as an assembler, only for a pure functional language, Haskell.
-I have some brief questions.
-
-1. By telling GCC not to use a frame pointer, one can eliminate
-the prolog entirely, can't one? So why edit it out?
-
-I guess the answer is going to be local variables, allocated once for
-all by the StartTCMachine routine. Still, it seems quite a pain. I guess
-one could sacrifice some (perhaps slight) speed by using a bunch of
-globals instead.
-
-2. You edit out the epilogue for tidiness only, I take it. It doesn't
-cause any problems if you leave it in, does it?
-
-3. Why do you use no-defer-pop (with the associated bug)?
-
-4. Why does JUMPNEXT have a loop? Surely the jump leaves the loop right
-away. Presumably you are tricking the compiler somehow.
-
-Thanks
-
-Simon L Peyton Jones
-Glasgow University
-
-
-
-
-Simon
-============================= Address change =======================
-My email address is now officially: simonpj@dcs.glasgow.ac.uk
-This may fail if your local site has out-of-date mail tables.
-The old address (simonpj@cs.glasgow.ac.uk) will work for quite a long while,
-so stay with the old one if the new one fails.
-====================================================================
-
->From eliot@cs.qmw.ac.uk Fri Apr 5 12:18:18 1991
-Via: uk.ac.qmw.cs; Fri, 5 Apr 91 12:18:06 BST
-Received: from aux47 by redstar.cs.qmw.ac.uk id aa26828; 5 Apr 91 12:17 BST
-Reply-To: eliot@cs.qmw.ac.uk
-In-Reply-To: Simon L Peyton Jones's mail message
-<2763.9104050851@tutuila.dcs.glasgow.ac.uk>
-Message-Id: <9104051217.aa26828@uk.ac.qmw.cs.redstar>
-From: Eliot Miranda <eliot@cs.qmw.ac.uk>
-To: simonpj
-Cc: partain
-Subject: re: Threaded code
-Date: Fri, 5 Apr 91 10:54:25 BST
-
->
->Eliot
->
->I saw your article about threaded code. Like you and others, we are
->using C as an assembler, only for a pure functional language, Haskell.
->I have some brief questions.
->
->1. By telling GCC not to use a frame pointer, one can eliminate
->the prolog entirely, can't one? So why edit it out?
-No, registers local to the procedure will still be saved & stack space
-allocated for automatic variables. This IS a problem since the
-threaded-code jump at the end of the procedure will miss the register
-restores before the epilogue. Consequently the stack will grow unless
-these register saves & stack-space allocations are removed.
->
->I guess the answer is going to be local variables, allocated once for
->all by the StartTCMachine routine. Still, it seems quite a pain. I guess
->one could sacrifice some (perhaps slight) speed by using a bunch of
->globals instead.
-For certain routines, not using register variables will be expensive
-(e.g. a simple integer arithmetic primitive).
->
->2. You edit out the epilogue for tidiness only, I take it. It doesn't
->cause any problems if you leave it in, does it?
-No, but given that the prologue has to be removed & removing the epilogue
-is as easy (& given finite memory :-) one might as well remove it.
->
->3. Why do you use no-defer-pop (with the associated bug)?
-OK. This is again to avoid stack growth. On conventional stack architectures
-gcc will try & combine the argument popping code of a sequence of
-procedure calls.
-e.g.
-extern long a, b, c;
-void doit() {
- foo(a); bar(b); baz(c);
-}
-
-with -O -no-defer-pop one might expect gcc to generate
-
- link %a6,#0
- movel a,%sp@-
- jbsr foo
- addqw #4,%sp
- movel b,%sp@-
- jbsr bar
- addqw #4,%sp
- movel c,%sp@-
- jbsr baz
- addqw #4,%sp
- unlk %a6
- rts
-
-but because gcc knows that the unlk instruction will roll back the stack
-in fact gcc generates:
-
- link %a6,#0
- movel a,%sp@-
- jbsr foo
- addqw #4,%sp
- movel b,%sp@-
- jbsr bar
- addqw #4,%sp
- movel c,%sp@-
- jbsr baz
- unlk %a6
- rts
-
-With -O -fdefer-pop gcc optimizes out the pops completely & generates:
-
- link %a6,#0
- movel a,%sp@-
- jbsr foo
- movel b,%sp@-
- jbsr bar
- movel c,%sp@-
- jbsr baz
- unlk %a6
- rts
-
-with -O -fomit-frame-pointer -fdefer-pop gcc generates:
-
- movel a,%sp@-
- jbsr foo
- movel b,%sp@-
- jbsr bar
- movel c,%sp@-
- jbsr baz
- addw #12,%sp
- rts
-
-& with -O -fomit-frame-pointer -fno-defer-pop gcc generates:
-
- movel a,%sp@-
- jbsr foo
- addqw #4,%sp
- movel b,%sp@-
- jbsr bar
- addqw #4,%sp
- movel c,%sp@-
- jbsr baz
- addqw #4,%sp
- rts
-
-All the above cases are as one would wish. The elimination of all
-defered pops in the unlk instruction is especially clever.
-
-However, in the presence of the threaded-code jump the waters darken!
-Consider what gcc generates for:
-
- register void (**tcip)() asm("%a5");
-
- #define JUMPNEXT do{asm("movl %a5@+,%a0; jmp %a0@");return;}while(0)
-
- extern long a, b;
- void doit() {
- foo(a); bar(b); JUMPNEXT;
- }
-with -O -fdefer-pop gcc generates
-
-doit:
- link %a6,#0
- movel a,%sp@-
- jbsr foo
- movel b,%sp@-
- jbsr bar
-#APP
- movl %a5@+,%a0; jmp %a0@
-#NO_APP
- unlk %a6
- rts
-
-This is clearly no good because the arguments to both foo & bar
-will never be popped. Every time doit() is executed the stack will grow
-by 8 bytes. Soon your program will dump core with a very large stack
-segment!
-
-with -O -fno-defer-pop gcc generates:
-
- link %a6,#0
- movel a,%sp@-
- jbsr foo
- addqw #4,%sp
- movel b,%sp@-
- jbsr bar
-#APP
- movl %a5@+,%a0; jmp %a0@
-#NO_APP
- unlk %a6
- rts
-
-Again useless because bar's pop has been folded into the unlk
-which won't be executed.
-
-with -O -fdefer-pop -fomit-frame-pointer gcc generates
-
- movel a,%sp@-
- jbsr foo
- movel b,%sp@-
- jbsr bar
- addqw #8,%sp
-#APP
- movl %a5@+,%a0; jmp %a0@
-#NO_APP
- rts
-
-This is great. However, not all functions are susceptible to
-the omit-frame-pointer optimization (i.e. functions
-with local variables). E.g. the code generated for:
-
- register void (**tcip)() asm("%a5");
-
- #define JUMPNEXT do{asm("movl %a5@+,%a0; jmp %a0@");return;}while(0)
-
- extern long a, b;
- void doit() {
- char large[1024];
- foo(a,large); bar(b); JUMPNEXT;
- }
-
-with -O -fomit-frame-pointer -fdefer-pop is:
-
- link %a6,#-1024
- pea %a6@(-1024)
- movel a,%sp@-
- jbsr foo
- movel b,%sp@-
- jbsr bar
-#APP
- movl %a5@+,%a0; jmp %a0@
-#NO_APP
- unlk %a6
- rts
-
-so in general one can't rely on -fomit-frame-pointer.
-For the above example both
- -O -fomit-frame-pointer -fno-defer-pop
-and
- -O -fno-defer-pop
-generate:
-
-doit:
- link %a6,#-1024
- pea %a6@(-1024)
- movel a,%sp@-
- jbsr foo
- addqw #8,%sp
- movel b,%sp@-
- jbsr bar
-#APP
- movl %a5@+,%a0; jmp %a0@
-#NO_APP
- unlk %a6
- rts
-
-This is also useless because bar's argument pop has been folded away.
-The problem is that gcc will always fold away the last call's argument
-pop if the function has a frame pointer, and -fomit-frame-pointer
-can't allways get rid of the frame pointer. In fact, in the presence
-of variable sized automatic variables or calls on alloca it would be
-very hard (impossible for recursive functions?) to do.
-
-The eatest solution I've come up with is to use -fno-defer-pop
-and a dummy function call between the threaded-code jump and
-the return:
-
- register void (**tcip)() asm("%a5");
-
- #define JUMPNEXT do{asm("movl %a5@+,%a0; jmp
-%a0@");SBD();return;}while(0)
-
- extern long a, b;
- void doit() {
- foo(a); bar(b); JUMPNEXT;
- }
-with -O -fno-defer-pop gcc generates:
-
- link %a6,#0
- movel a,%sp@-
- jbsr foo
- addqw #4,%sp
- movel b,%sp@-
- jbsr bar
- addqw #4,%sp
-#APP
- movl %a5@+,%a0; jmp %a0@
-#NO_APP
- jbsr SBD
- unlk %a6
- rts
-
-Now bar's argument pop is not folded because its no longer the last
-call in the routine, SBD is.
-So the call to SBD
- a) prevents gcc's 'last call argument pop fold into unlk' optimization
- which prevents uncontrolled stack growth.
- b) doesn't get executed because of the jump
- c) is trivial to remove from the assembler with a sed-script
-
-
->4. Why does JUMPNEXT have a loop? Surely the jump leaves the loop right
->away. Presumably you are tricking the compiler somehow.
->
-This is old C lore. The problem is
- 'How do you write a macro that is a sequence of statements
- that can be used wherever a single statement can?'
-
-take the following definition of JUMPNEXT:
-#define JUMPNEXT asm("movl %a5@+,%a0; jmp %a0@");return;
-
-Now invoke it here:
- if (its_time_to_jump)
- JUMPNEXT;
- do_something_else();
-
-This expands to:
- if (its_time_to_jump)
- asm("movl %a5@+,%a0; jmp %a0@");
- return;
- do_something_else();
-
-Not at all whats intended!
-
-There are two tricks I know of (the first I saw in Berkely Smalltalk,
-the second in Richard Stallman's gcc manual. I expect they're both
-quite old).
-The first is to surround your statements with
-if (TRUE){statements}else
-i.e.
-#define JUMPNEXT if(1){asm("movl %a5@+,%a0; jmp %a0@");return;}else
-So now we get:
- if (its_time_to_jump)
- if (1){
- asm("movl %a5@+,%a0; jmp %a0@");
- return;
- else;
- do_something_else();
-
-which works because C binds elses innermost first. However, some
-compilers will whine about dangling elses. The second scheme is
-more elegant (-:
-
-Surround your statements with
-do{statements}while(FALSE);
-which will execute statements precisely once (its NOT a loop).
-i.e.
-#define JUMPNEXT do{asm("movl %a5@+,%a0; jmp %a0@");SBD();return;}while(0)
-expands to
-
- if (its_time_to_jump)
- do {
- asm("movl %a5@+,%a0; jmp %a0@");
- return;
- while(0);
- do_something_else();
-
-which does what's wanted and doesn't incur compiler whines.
-
-
->Thanks
->
->Simon L Peyton Jones
->Glasgow University
-
-Eliot Miranda email: eliot@cs.qmw.ac.uk
-Dept of Computer Science Tel: 071 975 5229 (+44 71 975 5229)
-Queen Mary Westfield College ARPA: eliot%cs.qmw.ac.uk@nsf.ac.uk
-Mile End Road UUCP: eliot@qmw-cs.uucp
-LONDON E1 4NS
-
->From vestal@SRC.Honeywell.COM Fri Apr 5 12:26:11 1991
-From: vestal@SRC.Honeywell.COM (Steve Vestal)
-Newsgroups: comp.compilers
-Subject: Re: Portable Fast Direct Threaded Code
-Keywords: interpreter, performance, design
-Date: 3 Apr 91 18:23:34 GMT
-Reply-To: vestal@SRC.Honeywell.COM (Steve Vestal)
-Organization: Honeywell Systems & Research Center
-In-Reply-To: pardo@cs.washington.edu's message of 2 Apr 91 19:21:25 GMT
-
-In article <1991Apr2.192125.7464@beaver.cs.washington.edu>
-pardo@cs.washington.edu (David Keppel) writes:
-[references about threaded code, much stuff deleted]
-
-David> %X Describes how to build a threaded code interpeter/compiler from
-David> scratch.
-David> * Less than 2:1 performance hit of threading compared to full
-David> compilation.
-
-I have a question about this. Numbers like this are often cited for
-threaded-type code, but in Bell's paper this was for the PDP-11 (whose
-addressing modes made it a natural for threaded code). Paul Klint's
-"Interpretation Techniques" paper (Software P&E, v11, 1981) cites a
-significant difference for interpreter fetch/decode times on different
-architectures. He cited numbers around 2:1 for the PDP-11, but something
-more like 9:1 for a Cyber. I did a Q&D evaluation of this for a RISC, and
-the ratio I guestemated was closer to that Klint gave for the Cyber than
-for the PDP-11 (not unexpectedly).
-
-How architecturally dependent is the performance of these techniques
-(relative to compiling to native code)?
-
-Steve Vestal
-Mail: Honeywell S&RC MN65-2100, 3660 Technology Drive, Minneapolis MN 55418
-Phone: (612) 782-7049 Internet: vestal@src.honeywell.com
---
-Send compilers articles to compilers@iecc.cambridge.ma.us or
-{ima | spdcc | world}!iecc!compilers. Meta-mail to compilers-request.
-
->From E.Ireland@massey.ac.nz Fri Apr 5 12:29:20 1991
-From: E.Ireland@massey.ac.nz (Evan Ireland)
-Newsgroups: comp.lang.functional
-Subject: Three address code
-Date: 4 Apr 91 21:49:21 GMT
-Reply-To: E.Ireland@massey.ac.nz
-Organization: Information Sciences, Massey University, New Zealand
-
-I've had no luck with mail, so this is for csdw at Rhodes University.
-
->
->In an attempt to optimize a functional language, I would like to
->turn the stack based intermediate code into three address code.
->
->Has anyone done similar conversions? Any references would be
->greatly appreciated.
-
-I do not have any references, but I thought that one aspect of my FAM
-implementation might be of interest.
-
-A number of interpreters and compilers that I have seen implement a stack
-pointer in a register or global variable. Then to implement various stack
-operations, they use auto-increment or auto-decrement operations on the stack
-pointer register. Since I generate portable C, and thus cannot assume I have
-
- DATA *f (register DATA *fp)
- {
- ....
- }
-
-Thus I pass to each function the current pointer to top of stack, from which it
-can index downwards to find its arguments. Within the function, I use indexing
-operations on fp, e.g. fp[3] = fp[1], to manipulate values on the stack, so I
-am not continually manipulating the stack pointer. If "f" calls another
-function, it will pass the address of the current top of stack, e.g. g (&f[5]).
-
-The advantage to me is that I have a register for a stack pointer even though I
-am generating portable C code.
-
-Now the relationship to three-address code. If you adopt such a scheme, and
-your three address instructions allow some indexing, you can sometimes generate
-
- ADD fp[3],f[4],fp[3]
-
-I hope this helps.
-_______________________________________________________________________________
-
-E.Ireland@massey.ac.nz Evan Ireland, School of Information Sciences,
- +64 63 69099 x8541 Massey University, Palmerston North, New Zealand.
-
->From pardo@cs.washington.edu Sat Apr 6 14:32:24 1991
-From: pardo@cs.washington.edu (David Keppel)
-Newsgroups: comp.compilers
-Subject: Re: Portable Fast Direct Threaded Code
-Keywords: interpreter, performance, design
-Date: 4 Apr 91 17:10:55 GMT
-Reply-To: pardo@cs.washington.edu (David Keppel)
-Organization: Computer Science & Engineering, U. of Washington, Seattle
-
->>>[Threaded code vs. compilation]
-
->pardo@cs.washington.edu (David Keppel) writes:
->>[Less than 2:1 performance hit of threading vs. full compilation.]
-
-Note also that the reference that claimed 2:1 (Peter M. Kogge, IEEE
-Computer pp 22-33 March 1982) also attributed part of that ratio to the
-poor state of compiler optimization.
-
-
-vestal@SRC.Honeywell.COM (Steve Vestal) writes:
->[Klint says 2:1 for PDP-11 v. 9:1 for Cyber.
-> How architecturally dependent are these techniques?]
-
-Suppose that the statically-compiled code fragments that are threaded
-together are called `primitives'.
-
-
-When the execution time of a primitive is large, then the overhead for the
-interpreter can be large and still have a small effect on performance.
-The performance of the interpreted code is dominated by the time in a
-primitive vs. the overhead of moving between primitives.
-
-When the execution time of the primitives is small, then the overhead for
-moving between primitives can be a large fraction of the total execution
-time. Overhead comes from at least two sources:
-
- * Control flow: the address of the the next primitive is loaded
- from data memory and the processor executes an indirect jump.
-
- * Register allocation: a primitive is essentially a function with
- a fast calling convention (no stack adjustment). Thus, all the
- traditional problems with interprocedural register allocation.
-
-Examples of `large primitives' are ``draw circle'' and ``interpolate
-spline''. Examplees of small primitives are ``push'', ``add'', etc.
-
-
-* Architectural dependency of control flow
-
-Examples:
-
- Direct jumps in full compilation:
-
- op1
- op2
- br next // direct jump
-
- Indirect jumps for threading for a CISC:
-
- op1
- op2
- br *(r0)+ // load, increment, jump
-
- Indirect jumps for threading for a RISC:
-
- ld *r0, r1 // scheduled load
- op1
- op2
- br *r1 // jump
- add r1, #4, r1 // delay slot increment
-
-Direct jumps in full compilation can frequently use one instruction (a
-``near branch'') both to find the address of the next code fragment and
-perform the control transfer. On a CISC, branches are typically two or
-three bytes. On a RISC, branches are typically four bytes. The threaded
-indirect (load, increment, jump) is typically three bytes on a CISC and
-twelve bytes (three instructions) on a RISC.
-
-Direct jumps in full compilation take typically one instruction time.
-Indirect jumps take at least the following operations: load, increment,
-jump indirect. On a CISC, the three operations can typically be `folded'
-in to one instruction. There may be a load penalty of one instruction
-time but the increment is overlapped, so the total time is three machine
-units (one `unit' is about one register->register operation). On a RISC,
-the total penalty is three machine units.
-
-Direct jumps take one (I-cache) cycle to fetch both the branch instruction
-and the address of the branch target. Indirect jumps take a D-cache cycle
-to fetch the address of the branch target and an I-cache cycle to fetch
-the branch instruction.
-
-Direct jumps can take advantage of instruction prefetching since the
-address of the next instruction is known at the time that the instruction
-prefetch reads the direct jump. Threaded indirects require an indirect
-branch off of a register. Current RISC and CISC machines are about
-equivalent in that they do little prefetching. Some machines being
-designed do more prefetching so the threading overhead for them will be
-greater.
-
-
-* Architectural dependency of register allocation
-
-In a machine with a small number of registers, many of the registers are
-in-use in each primitive and the best possible register allocation will
-contain many loads and stores. In a machine with a large number of
-registers, the full-compilation implementation can make much better use of
-registers than the threaded primitives implementation (again, for small
-primitives). The savings from full compilation are exactly analagous to
-the improvements in register allocation from doing inlining of small
-procedures.
-
-
-* Other points to ponder
-
-In some cases the threaded code implementation is substantially smaller
-than the full-compilation implementation. For large functions or a
-machine with small caches, the loss of performance from threading might be
-overwhelmed by the gain in cache performance.
-
-On RISC machines, procedure call/return is about twice the cost of other
-control flow, except for the overhead of register management. Thus,
-call-dense RISC code from full compilation may behave about the same as
-threaded code.
---
-Send compilers articles to compilers@iecc.cambridge.ma.us or
-{ima | spdcc | world}!iecc!compilers. Meta-mail to compilers-request.
-
->From airs!ian@uunet.UU.NET Sat Apr 6 14:32:56 1991
-From: airs!ian@uunet.UU.NET (Ian Lance Taylor)
-Newsgroups: comp.compilers
-Subject: Threaded code
-Keywords: books, interpreter, design
-Date: 4 Apr 91 07:19:41 GMT
-Reply-To: airs!ian@uunet.UU.NET (Ian Lance Taylor)
-Organization: Compilers Central
-
-The book ``Threaded Interpretive Languages'' has a quite complete
-implementation of a threaded version of Forth in Z80 assembler. It's
-a very complete description of why threaded implementations exist and
-how to implement them easily. It's by R. G. Loeliger and was
-published by Byte Books (ISBN 0-07-038360-X). It was published in
-1981, though, and I'm sure it's long out of print.
-
-Ian Taylor airs!ian@uunet.uu.net uunet!airs!ian
---
-Send compilers articles to compilers@iecc.cambridge.ma.us or
-{ima | spdcc | world}!iecc!compilers. Meta-mail to compilers-request.
-
->From firth@sei.cmu.edu Sun Apr 7 14:33:13 1991
-From: firth@sei.cmu.edu (Robert Firth)
-Newsgroups: comp.compilers
-Subject: Re: Portable Fast Direct Threaded Code
-Keywords: interpreter, performance, design
-Date: 4 Apr 91 13:27:21 GMT
-Reply-To: firth@sei.cmu.edu (Robert Firth)
-Organization: Software Engineering Institute, Pittsburgh, PA
-
-In article <1991Apr3.182334.16164@src.honeywell.com>
-vestal@SRC.Honeywell.COM (Steve Vestal) writes:
-
->How architecturally dependent is the performance of these techniques
->(relative to compiling to native code)?
-
-[cost of threaded code on PDP-11, RISC &c]
-
-We might have a misunderstanding here, because what I think of as threaded
-code doesn't have a decoding and interpretation step. But I'll talk of
-what I know.
-
-A program in threaded code is just an array of addresses, possibly
-interspersed with operands. So the fragment
-
- c := a + b
-
-becomes something like
-
- address of 'load'
- address of 'a'
- address of 'load'
- address of 'b'
- address of '+'
- address of 'store'
- address of 'c'
-
-This implies a very simple virtual stack machine - you can get more clever
-by implementing a virtual register machine.
-
-The basic execution thread then does this. We point a global register at
-the table of addresses, and each primitive has the form
-
- treg := treg + address'size
- ...
- jump (treg)
-
-As you can see, this is great on the PDP-11, since that reduces to one
-instruction
-
- MOV (treg)+,PC ; NOTE TO MAINTAINER: FASTER THAN JMP - DON'T TOUCH!
-
-On a typical RISC machine, it's two cycles, since you can put almost
-anything in the delay slot(s) after the jump.
-
-Now, the load instruction, for instance, would say
-
-load: treg := treg + address'size
- load (treg) into tempreg
- treg := treg + address'size
- push (tempreg) onto simulated stack
- jump (treg)
-
-On the PDP-11, that's
-
- MOV @(treg)+, -(SP)
- MOV (treg)+, PC
-
-On a RISC, it's much more like
-
- L R0, 4(treg) ; get operand address
- L R0, 0(R0) ; dereference to get operand
- SUBI SP, #4 ; decrement simulated SP
- S R0, 0(SP) ; push operand on stack
- ADDI treg, #8 ; step over two addresses (mine & operands)
- JR (treg) ; over to you, Moriarty!
-
-[to fill delay slots, shuffle the above to 132564]
-
-Well, if you have one load delay slot and one branch delay slot, you can
-fill all three of them, so that's 6 cycles. Given that a typical load is
-only 1.1 cycles in direct code (90% of the delays filled), this is
-certainly a lot more than a 2:1 overhead! When you add the cost of a
-simulated stack (lots of needless loads and stores), I can well believe
-10:1 time expansion for simple code.
-
-In fact, it was more than that on the PDP-11, if you compared threaded
-code with direct code from a decent compiler. The big win in the Fortran
-compiler came from (a) very compact threaded code, and (b) the floating
-point operations were implemented in software, so the overhead of threaded
-code was swamped by the cost of floating addition, subtraction &c.
-
-Here's the full code of the above example, so you can see for yourself
-
-Direct:
- MOV a, R0
- ADD b, R0
- MOV R0, c
-
-Threaded:
- MOV @(treg)+, -(SP)
- MOV (treg)+, PC
-* MOV @(treg)+, -(SP)
-* MOV (treg)+, PC
-* ADD (SP)+,(SP)
- MOV (treg)+, PC
- MOV (SP)+, @(treg)+
- MOV (treg)+, PC
-
-Note that, if you implement a one-address add, you save two instructions,
-since the *** bit reduces to
-
- ADD @(treg)+, (SP)
-
-But even then, it's coming out at over 4:1.
-
-What architectural features make threaded code more efficient? The
-fundamental one is main memory that is fast (or not too slow) relative to
-registers, since you're doing a lot more fetching. Another is a set of
-address modes with double indirection, since you're accessing most
-operands one level of indirection further back. And good old
-autoincrement helps a little, too. Alas, none of that says 'risc', and
-much of it says '1960s'.
-
-Incidentally, if I were to do this again today, I'd definitely simulate a
-general-register machine and use a subset of the real machine's registers.
-If you take 8 of them, you then have 8 loads and stores, one for each
-register, but if you make an absolute rule that nobody even thinks about
-touching one of those 8 that doesn't belong to him, then all the good
-tricks about register allocation, slaving &c will still work. If you then
-implement the operations as one-address general-register, you have again 8
-versions (add into R0, add into R1, ...) and lo! you're programming a very
-familiar old friend.
-
-"But that was in another country, and besides, the machine is dead..."
-
-Robert Firth
---
-Send compilers articles to compilers@iecc.cambridge.ma.us or
-{ima | spdcc | world}!iecc!compilers. Meta-mail to compilers-request.
-
->From bpendlet@bambam.es.com Tue Apr 9 20:35:22 1991
-From: bpendlet@bambam.es.com (Bob Pendleton)
-Newsgroups: comp.compilers
-Subject: Re: Portable Fast Direct Threaded Code
-Keywords: interpreter, design
-Date: 8 Apr 91 19:48:00 GMT
-Reply-To: bpendlet@bambam.es.com (Bob Pendleton)
-Organization: Compilers Central
-
-In article <23613@as0c.sei.cmu.edu> you write:
-
-> A program in threaded code is just an array of addresses, possibly
-> interspersed with operands. So the fragment
->
-> c := a + b
->
-> becomes something like
->
-> address of 'load'
-> address of 'a'
-> address of 'load'
-> address of 'b'
-> address of '+'
-> address of 'store'
-> address of 'c'
->
-> This implies a very simple virtual stack machine - you can get more clever
-> by implementing a virtual register machine.
-
-About 10 years ago I was working on a lisp compler that compiled to
-threaded code. I was trying to get small code and still have some
-performance. (Since I wanted to run the code on a Z80 or 8080 small was
-important. My how things change :-)
-
-I found that the 3 most common operations in threaded code were load,
-store, and execute. So I put those operations with the operands. This
-made the operands look rather like classes with load, store, and
-execute as virtual functions. If you let the evaluate operation
-subsume the load and execute operations the threaded code for
-
- c := a + b;
-
-becomes
-
- address of 'a.evaluate()'
- address of 'b.evaluate()'
- address of '+'
- address of 'c.store()'
-
-and
-
- g := F(x, y);
-
-becomes
-
- address of 'x.evaluate()'
- address of 'y.evaluate()'
- address of 'F.evaluate()'
- address of 'g.store()'
-
-
-Which is much smaller than the original version of threaded code.
-
-Later, while working on a homebrew version of FORTH I gave up on
-threaded code completely. I found, like most who have expolored it,
-that symbolic execution of RPN code is a nice way to generated machine
-code. Machine code that runs much faster than threaded code, and that
-the machine code, even on an 8080, was only about 25% bigger than
-threaded code.
---
- Bob Pendleton
- bpendlet@dsd.es.com or decwrl!esunix!bpendlet or utah-cs!esunix!bpendlet
-[The DEC PDP-11 Fortran compiler did something similar, writing load routines
-for commonly used variables. -John]
---
-Send compilers articles to compilers@iecc.cambridge.ma.us or
-{ima | spdcc | world}!iecc!compilers. Meta-mail to compilers-request.
-
->From pardo@june.cs.washington.edu Wed Apr 24 09:26:32 1991
-From: pardo@june.cs.washington.edu (David Keppel)
-Newsgroups: comp.compilers
-Subject: Re: Fast Interpreted Code
-Keywords: interpreter, threaded code
-Date: 23 Apr 91 02:06:21 GMT
-Reply-To: pardo@june.cs.washington.edu (David Keppel)
-Organization: Computer Science & Engineering, U. of Washington, Seattle
-
-ssinghani@viewlogic.com (Sunder Singhani) writes:
->[Our threaded code isn't fast enough. What's faster?]
-
-As far as I know, threaded code gives the fastest primitives/second
-dispatch rate on a variety of architectures. The general techniques for
-making things faster (that I know of!) are to change things so that the
-dispatch rate can go down without changing the work that gets done (or use
-hardware, but we'll ignore that for the moment.)
-
-* Use a different v-machine instruction set
-
- The overhead of interpreting is almost nothing in generic PostScript
- imaging code because all the time is spent in non-interpretded
- primitives. If you can characterize your key operations (perhaps
- info in [Davidson & Fraser ??, Fraser, Myers & Wendt 84] can help
- you analyze for common operations instead of the more normal time in
- routines) then you can re-code your virtual instruction set to have
- as primintives oeprations that are performed frequently.
-
-* Dynamic compilation to native machine code
-
- This is what is done in ParcPlace System's Smalltalk-80
- implementation, [Deutsch & Schiffman 84] also Insignia Solution's
- 8086 interpreter.
-
- Dynamic compilation suffers from the need to do compilation at
- runtime: a compiler that produces better code will take longer to
- run and the compile time contributes to the overall program runtime.
- Also, program text isn't shared, even if multiple instances are
- running simultaneously.
-
-* Native-coding key routines
-
- If you believe that your program spends 80% of its time in a few key
- routines, then compiling just these routines -- statically, adding
- them to the primitive set, statically adding them as library
- routines, or dynamically -- can improve performance substantially
- [Pittman 87].
-
-
-5 Citations follow:
-
-%A Robert Bedichek
-%T Some Efficient Architecture Simulation Techniques
-%J Winter '90 USENIX Conference
-%D 26 October, 1989
-%W Robert Bedichek.
-%W Pardo has a copy.
-%X Describes a simulator that uses threaded-code techniques to emulate
-a Motorola 88000. Each 88k instruction is executed in about 20 host
-(68020) instructions. Discusses techniques used to get the simulation
-down from several thousand host instructions in many other
-simulators.
-
-%A Jack W. Davidson
-%A Chris W. Fraser
-%T Eliminating Redundant Object Code
-%J POPL9
-%P 128-132
-
-%A Peter Deutsch
-%A Alan M. Schiffman
-%T Efficient Implementation of the Smalltalk-80 System
-%J 11th Annual Symposium on Principles of Programming Languages
-(POPL 11)
-%D January 1984
-%P 297-302
-%X Dynamic translatin of p-code to n-code (native code).
-Resons for not using straight p-code or straight n-code:
- * p-code is smaller than n-code (<= 5X).
- * The debugger can debug p-code, improving portability.
- * Native code is faster (<= 2X). Reasons include
-special fetch/decode/dispatch hardware;
-p-machine and n-machine may be very different, e.g.,
-stack machine vs. register-oriented.
- * Threaded code does reduce the cost of p-code fetch/decode.
-Does not help with operand decoding.
-Does not allow optimizations to span more than one instruction.
-[pardo: that's not technically true, but each optimized
-instruction must exist in addition to the unoptimized version.
-That leads to exponential blowup of the p-code. Example: delayed
-branch and non-delayed branch versions of Robert Bedichek's 88k
-simulator.]
- The system characteristics:
- * The time to translate to n-code via macro expansion is about the
-same as the execute time to interpret the p-code.
- * (pg 300:) Self-modifying code (SMC) is deprecated but used in a
-``well-confined'' way. Could indirect at more cost. Use SMC on the
-machines where it works, indirection where SMC.
-doesn't.
- * Performance is compared to a ``straightforward'' interpreter.
-What's that?
-
-%A Christopher W. Fraser
-%A Eugene W. Myers
-%A Alan L. Wendt
-%T Analyzing and Compressing Assembly Code
-%J CCC84
-%P 117-121
-
-%A Thomas Pittman
-%T Two-Level Hybrid Interpreter/Native Code Execution for Combined
-Space-Time Program Efficiency
-%D 1987
-%J ACM SIGPLAN
-%P 150-152
-%X Talks about native code execution vs. various kinds of interpreting
-and encoding of key routines in assembly.
-
-
-Hope this helps!
-
- ;-D on ( This is all open to interpretation ) Pardo
---
-Send compilers articles to compilers@iecc.cambridge.ma.us or
-{ima | spdcc | world}!iecc!compilers. Meta-mail to compilers-request.
-
->From eliot@cs.qmw.ac.uk Tue Apr 30 15:55:17 1991
-From: eliot@cs.qmw.ac.uk (Eliot Miranda)
-Newsgroups: comp.compilers,gnu.gcc.bug,alt.sources
-Subject: re: Threaded Code
-Keywords: design, interpreter
-Date: 5 Apr 91 11:43:51 GMT
-Reply-To: Eliot Miranda <eliot@cs.qmw.ac.uk>
-Followup-To: comp.compilers
-Organization: Computer Science Dept, QMW, University of London, UK.
-
-I recently posted articles to comp.compilers & alt.sources on how
-to write threaded code machines in C. I received the following questions
-from Simon Peyton Jones at Glasgow. They are specific to GCC.
-Since they have non-obvious answers & since the answers suggest
-augmentation of the GCC compiler I'm posting my response to Simon.
-
->From: Simon L Peyton Jones <simonpj@cs.gla.ac.uk>
->
->I saw your article about threaded code. Like you and others, we are
->using C as an assembler, only for a pure functional language, Haskell.
->I have some brief questions.
->
->1. By telling GCC not to use a frame pointer, one can eliminate
->the prolog entirely, can't one? So why edit it out?
-
-No, registers local to the procedure will still be saved & stack space
-allocated for automatic variables. This IS a problem since the
-threaded-code jump at the end of the procedure will miss the register
-restores before the epilogue. Consequently the stack will grow unless
-these register saves & stack-space allocations are removed. Also
-GCC can not always eliminate the frame pointer.
-
->I guess the answer is going to be local variables, allocated once for
->all by the StartTCMachine routine. Still, it seems quite a pain. I guess
->one could sacrifice some (perhaps slight) speed by using a bunch of
->globals instead.
-For certain routines, not using register variables will be expensive
-(e.g. a simple integer arithmetic primitive).
-
->2. You edit out the epilogue for tidiness only, I take it. It doesn't
->cause any problems if you leave it in, does it?
-No, but given that the prologue has to be removed & removing the epilogue
-is as easy (& given finite memory :-) one might as well remove it.
->
->3. Why do you use no-defer-pop (with the associated bug)?
-OK. This is again to avoid stack growth. On conventional stack architectures
-gcc will try & combine the argument popping code of a sequence of
-procedure calls.
-e.g.
-extern long a, b, c;
-void doit() {
- foo(a); bar(b); baz(c);
-}
-
-with -O -no-defer-pop one might expect gcc to generate
-
- link %a6,#0
- movel a,%sp@-
- jbsr foo
- addqw #4,%sp
- movel b,%sp@-
- jbsr bar
- addqw #4,%sp
- movel c,%sp@-
- jbsr baz
- addqw #4,%sp
- unlk %a6
- rts
-
-but because gcc knows that the unlk instruction will roll back the stack
-in fact gcc generates:
-
- link %a6,#0
- movel a,%sp@-
- jbsr foo
- addqw #4,%sp
- movel b,%sp@-
- jbsr bar
- addqw #4,%sp
- movel c,%sp@-
- jbsr baz
- unlk %a6
- rts
-
-With -O -fdefer-pop gcc optimizes out the pops completely & generates:
-
- link %a6,#0
- movel a,%sp@-
- jbsr foo
- movel b,%sp@-
- jbsr bar
- movel c,%sp@-
- jbsr baz
- unlk %a6
- rts
-
-with -O -fomit-frame-pointer -fdefer-pop gcc generates:
-
- movel a,%sp@-
- jbsr foo
- movel b,%sp@-
- jbsr bar
- movel c,%sp@-
- jbsr baz
- addw #12,%sp
- rts
-
-& with -O -fomit-frame-pointer -fno-defer-pop gcc generates:
-
- movel a,%sp@-
- jbsr foo
- addqw #4,%sp
- movel b,%sp@-
- jbsr bar
- addqw #4,%sp
- movel c,%sp@-
- jbsr baz
- addqw #4,%sp
- rts
-
-All the above cases are as one would wish. The elimination of all
-defered pops in the unlk instruction is especially clever.
-
-However, in the presence of the threaded-code jump the waters darken!
-Consider what gcc generates for:
-
- register void (**tcip)() asm("%a5");
-
- #define JUMPNEXT do{asm("movl %a5@+,%a0; jmp %a0@");return;}while(0)
-
- extern long a, b;
- void doit() {
- foo(a); bar(b); JUMPNEXT;
- }
-with -O -fdefer-pop gcc generates
-
-doit:
- link %a6,#0
- movel a,%sp@-
- jbsr foo
- movel b,%sp@-
- jbsr bar
-#APP
- movl %a5@+,%a0; jmp %a0@
-#NO_APP
- unlk %a6
- rts
-
-This is clearly no good because the arguments to both foo & bar
-will never be popped. Every time doit() is executed the stack will grow
-by 8 bytes. Soon your program will dump core with a very large stack
-segment!
-
-with -O -fno-defer-pop gcc generates:
-
- link %a6,#0
- movel a,%sp@-
- jbsr foo
- addqw #4,%sp
- movel b,%sp@-
- jbsr bar
-#APP
- movl %a5@+,%a0; jmp %a0@
-#NO_APP
- unlk %a6
- rts
-
-Again useless because bar's pop has been folded into the unlk
-which won't be executed.
-
-with -O -fdefer-pop -fomit-frame-pointer gcc generates
-
- movel a,%sp@-
- jbsr foo
- movel b,%sp@-
- jbsr bar
- addqw #8,%sp
-#APP
- movl %a5@+,%a0; jmp %a0@
-#NO_APP
- rts
-
-This is great. However, not all functions are susceptible to
-the omit-frame-pointer optimization (i.e. functions
-with local variables). E.g. the code generated for:
-
- register void (**tcip)() asm("%a5");
-
- #define JUMPNEXT do{asm("movl %a5@+,%a0; jmp %a0@");return;}while(0)
-
- extern long a, b;
- void doit() {
- char large[1024];
- foo(a,large); bar(b); JUMPNEXT;
- }
-
-with -O -fomit-frame-pointer -fdefer-pop is:
-
- link %a6,#-1024
- pea %a6@(-1024)
- movel a,%sp@-
- jbsr foo
- movel b,%sp@-
- jbsr bar
-#APP
- movl %a5@+,%a0; jmp %a0@
-#NO_APP
- unlk %a6
- rts
-
-so in general one can't rely on -fomit-frame-pointer.
-For the above example both
- -O -fomit-frame-pointer -fno-defer-pop
-and
- -O -fno-defer-pop
-generate:
-
-doit:
- link %a6,#-1024
- pea %a6@(-1024)
- movel a,%sp@-
- jbsr foo
- addqw #8,%sp
- movel b,%sp@-
- jbsr bar
-#APP
- movl %a5@+,%a0; jmp %a0@
-#NO_APP
- unlk %a6
- rts
-
-This is also useless because bar's argument pop has been folded away. The
-problem is that gcc will always fold away the last call's argument pop if
-the function has a frame pointer, and -fomit-frame-pointer can't allways
-get rid of the frame pointer. In fact, in the presence of variable sized
-automatic variables or calls on alloca it would be very hard (impossible
-for recursive functions?) to do.
-
-The neatest solution I've come up with is to use -fno-defer-pop and a
-dummy function call between the threaded-code jump and the return:
-
- register void (**tcip)() asm("%a5");
-
- #define JUMPNEXT do{asm("movl %a5@+,%a0; jmp
-%a0@");SBD();return;}while(0)
-
- extern long a, b;
- void doit() {
- foo(a); bar(b); JUMPNEXT;
- }
-with -O -fno-defer-pop gcc generates:
-
- link %a6,#0
- movel a,%sp@-
- jbsr foo
- addqw #4,%sp
- movel b,%sp@-
- jbsr bar
- addqw #4,%sp
-#APP
- movl %a5@+,%a0; jmp %a0@
-#NO_APP
- jbsr SBD
- unlk %a6
- rts
-
-Now bar's argument pop is not folded because its no longer the last
-call in the routine, SBD is.
-So the call to SBD
- a) prevents gcc's 'last call argument pop fold into unlk' optimization
- which prevents uncontrolled stack growth.
- b) doesn't get executed because of the jump
- c) is trivial to remove from the assembler with a sed-script
-
-
-One an try to use -fcaller-saves, but this surrounds calls with unnecessary
-register saves & restores that for the code to be optimal have to be
-edited out.
-
->4. Why does JUMPNEXT have a loop? Surely the jump leaves the loop right
->away. Presumably you are tricking the compiler somehow.
-
-This is old C lore. The problem is
- 'How do you write a macro that is a sequence of statements
- that can be used wherever a single statement can?'
-
-take the following definition of JUMPNEXT:
-#define JUMPNEXT asm("movl %a5@+,%a0; jmp %a0@");return;
-
-Now invoke it here:
- if (its_time_to_jump)
- JUMPNEXT;
- do_something_else();
-
-This expands to:
- if (its_time_to_jump)
- asm("movl %a5@+,%a0; jmp %a0@");
- return;
- do_something_else();
-
-Not at all whats intended!
-
-There are two tricks I know of (the first I saw in Berkely Smalltalk,
-the second in Richard Stallman's gcc manual. I expect they're both
-quite old).
-The first is to surround your statements with
-if (TRUE){statements}else
-i.e.
-#define JUMPNEXT if(1){asm("movl %a5@+,%a0; jmp %a0@");return;}else
-So now we get:
- if (its_time_to_jump)
- if (1){
- asm("movl %a5@+,%a0; jmp %a0@");
- return;
- else;
- do_something_else();
-
-which works because C binds elses innermost first. However, some
-compilers will whine about dangling elses. The second scheme is
-more elegant (-:
-
-Surround your statements with
-do{statements}while(FALSE);
-which will execute statements precisely once (its NOT a loop).
-i.e.
-#define JUMPNEXT do{asm("movl %a5@+,%a0; jmp %a0@");SBD();return;}while(0)
-expands to
-
- if (its_time_to_jump)
- do {
- asm("movl %a5@+,%a0; jmp %a0@");
- return;
- while(0);
- do_something_else();
-
-which does what's wanted and doesn't incur compiler whines.
-
-
->Thanks
->
->Simon L Peyton Jones, Glasgow University
-
-More and more people are taking the 'use C as an assembler' route, and
-more and more people are using GCC to do it (because its code quality is
-good, it had global register variables, and it has an excellent asm
-facility). The threaded-code in C idea is also becomming more popular.
-But as the code above demonstrates, one does have to side-step
-optimizations and develop system-specific assembler editing scripts.
-
-I'd like to ask Richard Stallman & the GCC development team for
- -fno-prolog -fno-epilog
-flags that instruct gcc to generate
- a) no register saves or restores
- b) no automatic variable allocation
- c) no procedure linkage/frame creation
-
-Then the optimal 'Threaded-Code Machine in GCC C' can be compiled without
-any assembler editing scripts at all.
-
-Also nice would be a way of telling GCC that an asm statement
-changed the flow of control so GCC could
- a) warn about not-reached code
- b) eliminate unnecessary code (do more code folding)
---
-Eliot Miranda email: eliot@cs.qmw.ac.uk
-Dept of Computer Science Tel: 071 975 5229 (+44 71 975 5229)
-Queen Mary Westfield College ARPA: eliot%cs.qmw.ac.uk@nsf.ac.uk
-Mile End Road UUCP: eliot@qmw-cs.uucp
-LONDON E1 4NS
---
-Send compilers articles to compilers@iecc.cambridge.ma.us or
-{ima | spdcc | world}!iecc!compilers. Meta-mail to compilers-request.
-
->From brennan@bcsaic.boeing.com Sat May 4 11:28:41 1991
-From: brennan@bcsaic.boeing.com (Michael D Brennan)
-Newsgroups: comp.compilers
-Subject: re: Threaded code
-Keywords: interpreter, design
-Date: 2 May 91 19:50:23 GMT
-Reply-To: brennan@bcsaic.boeing.com (Michael D Brennan)
-Organization: Boeing Aerospace & Electronics, Seattle WA
-
-Another method for obtaining threaded byte code for an interpreter
-is to edit the assembler output of a big switch
-rather than editing the prologue and epilogue off functions calls.
-
-You don't need gcc, global vars in registers, works with smart and
-dumb compilers, and all optimization can be turned on.
-
-For example:
-
-This C routine executes (unthreaded) byte code for an interpreter
-that can add, subtract and print.
-
-#define HALT 0
-#define PUSH 1
-#define ADD 2
-#define SUB 3
-#define PRINT 4
-
-static int stack[32] ;
-
-void execute(code_ptr)
- register int *code_ptr ;
-{
- register int *stack_ptr = stack - 1 ;
-
-
- while ( 1 )
- {
- switch( *code_ptr++ )
- {
- case HALT : return ;
- case PUSH :
- * ++ stack_ptr = *code_ptr++ ;
- break ;
-
- case ADD :
- stack_ptr-- ;
- *stack_ptr += stack_ptr[1] ;
- break ;
-
- case SUB :
- stack_ptr-- ;
- *stack_ptr -= stack_ptr[1] ;
- break ;
-
- case PRINT :
- printf("%d\n", *stack_ptr--);
- break ;
- }
- }
-}
-
--------------------------------------------------------
-
-to interpret 2 + (3 - 4)
-
-the front end "compiles" in int code[]
-
-PUSH, 2, PUSH, 3, PUSH, 4, SUB, ADD, PRINT, HALT
-
-and calls execute(code).
-
-------------------------------------------------------
-
-The difference between this and the threaded code discussed over
-the last few weeks is the switch gets compiled as
-
- jmp TABLE[ *code_ptr++ ]
-
-where TABLE is the jump table generated by the compiler which holds
-the addresses of the case labels.
-
-With threading, the transitions between functions become
-
- jmp *code_ptr++
-
-
-but this is easy to get by editing the assembler output to
-export the case label and recode the switch.
-
---------------------------------------------------
-
-For example on a SPARC:
-
-code_ptr is %o0
-stack_ptr is %i5
-
-
- .....
- ! case PUSH
-L77004:
- ld [%i0],%o1
- inc 4,%i5
- inc 4,%i0
- b L77008
- st %o1,[%i5]
-
- .....
-
- ! the switch, doesn't change structure
- ! as you add new op codes
-
-L77008:
- mov %i0,%i4
- ld [%i4],%i4
- inc 4,%i0
- cmp %i4,4
- bgu L77008
- sll %i4,2,%i4
- sethi %hi(L2000000),%o1
- or %o1,%lo(L2000000),%o1 ! [internal]
- ld [%i4+%o1],%o0
- jmp %o0
- nop
-L2000000: ! the jump TABLE
- .word L77003 ! HALT etc
- .word L77004
- .word L77005
- .word L77006
- .word L77007
-
-
--------------------------------------------
-modify by adding global labels and edit the switch
-
-
-
- .....
- ! case PUSH
-_push :
-L77004:
- ld [%i0],%o1
- inc 4,%i5
- inc 4,%i0
- b L77008
- st %o1,[%i5]
-
- .....
-
- ! the edited switch
-L77008:
- mov %i0,%i4
- ld [%i4],%i4
- inc 4,%i0
- jmp %i4
- nop
- ! remove TABLE
-
--------------------------------------------
-
-For another example on an Intel 8088
-
-stack_ptr is si
-code_ptr is di
-
- ; while ( 1 )
- ; {
- ; switch( *code_ptr++ )
- ;
-@1@50:
- mov bx,di
- inc di
- inc di
- mov bx,word ptr [bx]
- cmp bx,3
- ja short @1@50
- shl bx,1
- jmp word ptr cs:@1@C738[bx]
-
-
-@1@122:
- ;
- ; case PUSH :
- ; * ++ stack_ptr = *code_ptr++ ;
- ;
- inc si
- inc si
- mov ax,word ptr [di]
- mov word ptr [si],ax
- inc di
- inc di
- ;
- ; break ;
- ;
- jmp short @1@50
- ;
-
- ....
-
-@1@C738 label word ; jump TABLE
- dw @1@194 ; HALT
- dw @1@122 ; PUSH etc
- dw @1@146
-
- ....
-
-------------------------------------------------
-
-edited the jump can be computed inline
-
- ; while ( 1 )
- ; {
- ; switch( *code_ptr++ )
- ;
-@1@50: ; switch code is replaced by code only executed once
-
- inc di
- inc di
- jmp [di-2]
-
- .....
-
-_push :
-@1@122:
- ;
- ; case PUSH :
- ; * ++ stack_ptr = *code_ptr++ ;
- ;
- inc si
- inc si
- mov ax,word ptr [di]
- mov word ptr [si],ax
- inc di
- inc di
- ;
- ; break ;
- ;
- inc di ; jmp to *code_ptr++ inline
- inc di
- jmp [di-2]
- ;
- ....
-
-----------------------------------------------
-
-the "front end" has defines
-
-typedef void (*TCODE)() ;
-
-extern void halt(), push(), add(), sub(), print() ;
-
-TCODE code[CODESIZE] ;
-
-in the array code[], the front end compiles
-
-
-push, 2, push, 3, push, 4, sub, add, print, halt
-
-and calls execute(code).
-
-
---
-Mike Brennan
-brennan@bcsaic.boeing.com
---
-Send compilers articles to compilers@iecc.cambridge.ma.us or
-{ima | spdcc | world}!iecc!compilers. Meta-mail to compilers-request.
-
-
diff --git a/ghc/docs/NOTES.core-overview b/ghc/docs/NOTES.core-overview
deleted file mode 100644
index 8f222994cb..0000000000
--- a/ghc/docs/NOTES.core-overview
+++ /dev/null
@@ -1,94 +0,0 @@
-\documentstyle[11pt,a4wide]{article}
-\begin{document}
-
-%****************************************
-%* *
-%* The Core language *
-%* *
-%****************************************
-
-
-\title{The Core language}
-\author{Simon L Peyton Jones \and
-Will Partain \and
-Patrick Sansom}
-
-\maketitle
-
-\section{Introduction}
-
-This document describes the Glasgow Haskell Core-language data type
-in sufficient detail for an implementor to be able to use it.
-
-\section{Overview}
-
-The Core language is, roughly speaking, the second-order polymorphic
-lambda calculus, augmented with @let@, @letrec@ and @case@.
-It is a Haskell data type (defined shortly), but for convenience in this
-document we give it the concrete syntax given in Figure~\ref{fig:core-syntax}.
-
-Here are some of its important characteristics:
-\begin{description}
-\item[The Core language includes the second-order lambda calculus.]
-That is, type abstraction and type application are provided.
-\item[Constructors and primitive operators are always saturated.]
-This is easily done by adding extra lambdas and performing $\eta$-expansion.
-\item[All pattern-matching is done by simple @case@ expressions.]
-The @case@ expressions are simple in the sense that their patterns
-have only one level.
-\item[Every identifier includes its type.]
-This is not immediately obvious from the syntax, but will be fleshed out
-later. The point is that it is easy to tell the type of any identifier or,
-in general, any Core expression.
-\item[There is no shadowing.]
-Identifiers may not be globally unique,
-but there are no ``holes in the scope'' of any identifier.
-\end{description}
-All these properties should be maintained by programs which manipulate Core-langauge
-programs.
-
-\section{Identifiers: the type @Id@}
-
-Identifiers have the (abstract) type @Id@.
-\begin{description}
-\item[Equality.]
-Identifiers have a unique number inside them,
-so they can be compared efficiently for equality.
-They are an instance of the class @Eq@.
-\item[Type.]
-The function
-\begin{verbatim}
- getIdUniType :: Id -> UniType
-\end{verbatim}
- gets the type of an identifer.
- \end{description}
-
- \section{Types: the type @UniType@}
-
- \subsection{@TyCon@}
-
- The type @TyCon@ ranges over {\em data} type constructors,
- not over the function type constructor.
-
- A @TyCon@ can be one of:
- \begin{itemize}
- \item A primitive type.
- \item A tuple type.
- \item An algebraic data type (other than tuples).
- \end{itemize}
-
- \section{The Core language data type}
-
- \subsection{@coreExpr@}
-
-Tycon in @case@.
-
-\subsection{@coreBinding@}
-
-\subsection{@coreProgram@}
-
-\subsection{@plainCore@ things}
-
-
-
-\end{document}
diff --git a/ghc/docs/NOTES.desugar b/ghc/docs/NOTES.desugar
deleted file mode 100644
index b9e6ce7a57..0000000000
--- a/ghc/docs/NOTES.desugar
+++ /dev/null
@@ -1,323 +0,0 @@
-(91/08/08: OLD!)
-
-These are notes about a _simple_ but complete pattern-matching
-compiler for Haskell. I presume familiarity with Phil's
-pattern-matching stuff in Simon's book and use roughly the same notation.
-
-Abbreviations: "p" for pattern, "e" (or "E") for expression, "g" for
-guard, "v" for variable, "u" for new variable I made up. "[]" for
-FATBAR.
-
-Subscripts: "p11" is really short for "p_{1,1}". Sometimes I'll use
-a "?", as in "pm1 ... pm?", to mean the second subscript goes up to
-something I'm really not worried about.
-
-NB: LETRECS NOT DEALT WITH YET.
-
----------------------------------------------------------------------
-We need a slightly souped-up "match" for Haskell (vs the Phil-chapter
-one). Simon suggested a re-arrangement of things, which I have then
-further re-arranged...
-
-Proposal (Simon)
-~~~~~~~~
-
-Eliminate default arg of match (3rd arg in Phil-chapter match) in
-favour of returning the variable (not special value) fail. Thus a
-possible translation for
-
- f [] [] = e1
- f x y = e2
-
-would be
-
- f p q = case p of
- [] -> case q of
- [] -> e1
- _ -> fail
- _ -> fail
- where
- fail = e2
-
-Now the issue of whether to duplicate code or share it becomes whether
-to substitute copies of e2 or not. This is a decision we need to take
-anyway for all other let-bound things, so why not for fail too? If
-fail is used only once, we will certainly substitute for it.
-
-We could even detect that fail is used only in a head position, so it
-can be implemented as a stack-adjust then a jump. This might well
-apply to other let-bound things too.
-
-Now here's a proposal for the "match" function. The main difference is
- 1) no default argument
- 2) [contra simon's suggestion] Patterns are still per-row as in
- Phil's chapter.
- 3) [partain] even the input exprs are CoreExprs
-
-OK, for a "match" for m equations each with n patterns:
-
-match :: [Name]
- -- n (variable) names, one per pattern column, bound
- -- to the n expressions we are matching against the
- -- patterns
-
- -> [([Pat], CoreExpr)]
- -- one pair for each of the m equations: the n
- -- patterns in that equation, then the CoreExpr that
- -- is evaluated if we get a match. The CoreExpr may
- -- contain free "fail"s; some hackery required to
- -- ensure that is OK; see below
-
- -> CoreExpr
- -- the resulting code to do the matching
-
-In words,
- takes
- (1) a list of n (match-expression, pattern-column) pairs
- (2) a list of m post-match expressions, expr i to be inserted
- immediately after equation i's lhs matches
- returns
- (1) a desugared expr equivalent of the whole "match"
-
-Meaning
-~~~~~~~
- match [u1, ..., un]
- [([p11, ..., p1n], e1), ..., ([pm1, ..., pmn], em)]
-
- match [ (e1, [p11, ...,pm1]), ..., (en, [p1n, ...,pmn])]
- [ E1, ... Em ]
-
- ********* MEANS *********
-
- case (u1, ..., un) of
- (p11, ..., p1n) -> e1
- _ -> fail
- where
- fail = case (u1, ..., un) of
- (p21, ..., p2n) -> e2
- _ -> fail
- ... and so on ...
-
-Alternatively, this specification could be given in terms of
-pattern-matching lambdas, as in Phil's chapter.
-
-NOT CHANGED BEYOND HERE
-
--------------------------------------------------------------------
-Cranking through a good old function definition with the above:
-
- f p11 p12 ... p1n | g11 = e11
- | g12 = e12
- ...
- | g1? = e1?
- ...
- f pm1 pm2 ... pmn | gm1 = em1
- ...
- | gm? = em?
-
-The "match" equivalent is:
-
-f = \u1.\u2...\un ->
- match [ (u1, [p11, ...,pm1]), ..., (un, [p1n, ...,pmn])]
- [ E1, ..., Em ]
- where fail = error "pattern-match for f failed\n"
- E1 = if g11 then e11 else if g12 then ... else fail
- ...
- Em = if gm1 then em1 else if gm2 then ... else fail
-
-Boring, huh?
-
--------------------------------------------------------------------
-It is helpful to me to think about the simple/base cases for this
-complicated "match".
-
-ALL LISTS EMPTY
-
- match [] []
-
- corresponds to the syntactically bogus (zero equations!?)
-
- case () of
- () -> {- nothing!! -}
- _ -> fail
-
-
-EMPTY RULE -- no more patterns
-
- match [] [ ([], E1), ..., ([], Em) ]
-
- [where, incidentally, each Ei will be of the form
- (not that it has to be...)
-
- Ei = let x1 = e1 in
- let x2 = e2 in
- ...
- let x? = e? in
- if g1 then e'1
- else if g2 then
- ...
- else if g? then e'?
- else fail
- ]
-
- becomes ("E1 [] E2 [] ... [] Em" in Phil's chapter...)
-
- E1
- where
- fail = E2
- where
- ...
- fail = Em-1
- where fail = Em
-
- with any "fail" in Em being bound from an outer scope; perhaps it's
- easier to see written as:
-
- let fail = Em
- in let fail = Em-1
- in ...
- let fail = E2 in E1
--------------------------------------------------------------------
-HANDLING LAZY ("TWIDDLE") PATTERNS
-
-For Haskell, the "mixture rule" (p.~88) looks at a pattern-column and
-splits the equations into groups, depending on whether it sees
-
- * all constructors, or
- * all variables _OR LAZY PATTERNS_
-
-The following example shows what "match" does when confronted by one
-of these variables/lazy-patterns combinations. Note the use of the
-binding lists.
-
- f v | g11 = e11
- ...
- | g1? = e1?
- f ~p | g21 = e21
- ...
- | g2? = e2?
-
-is
-
- f = \ u1 ->
- match [(u1, [ v, ~p ])]
- [ if g11 then e11 else if ... else fail, -- E1
- if g21 then e21 else if ... else fail -- E2
- ]
- where fail = error "no match in f\n"
-
-which transmogrifies into
-
- f = \ u1 ->
- let u2 = u1 in
- match []
- [ -- E1 --
- let v = u2
- in
- if g11 then e11 else if ... else fail
-
- ,-- E2 --
- let free_var1_of_p = match [(u2, [ p ])] [ free_var1_of_p ]
- ...
- free_var?_of_p = match [(u2, [ p ])] [ free_var?_of_p ]
- in
- if g21 then e21 else if ... else fail -- E2
-
- ]
- where fail = error "no match in f\n"
-
-For more specific match-failure error messages, one could insert
-"let fail = ..."'s in strategic places.
-
--------------------------------------------------------------------
-"match" EQUIVALENTS FOR VARIOUS HASKELL CONSTRUCTS
-
-* function definition -- shown above
-
-* pattern-matching lambda (souped up version in static semantics)
-
- \ p1 p2 ... pn | g1 -> e1
- | g2 -> e2
- ...
- | gm -> em
-
- is the same as
-
- \ u1.\u2 ... \un ->
- match [ (u1, [p1]), ..., (un, [pn])]
- [ if g1 then e1 else if ... then em else fail
- ]
- where fail = error "no match in pattern-matching lambda at line 293\n"
-
-* pattern-matching (simple, non-recursive) "let"
-
- let p = e
- in E
-
- corresponds to
-
- case e of
- ~p -> E
-
- which has a "match" equivalent of
-
- match [(e, [~p])] [ E ]
-
- The full-blown Haskell "let" is more horrible:
-
- let p | g1 = e1
- ...
- | gn = en
- in E
-
- corresponds to
-
- case ( if g1 then e1 else... else if gn then en else error "?" ) of
- ~p -> E
-
- thinking about which I am not able to sleep well at night.
- (Won't those g's have things bound from inside p ?)
-
-* pattern-matching (not-quite-so simple, non-recursive) "let"
-
-<mumble>
-
-* pattern binding
-
- p | g1 = e1
- | g2 = e2
- ...
- | gm = em
-
- That's the same as
-
- p = if g1 then e1 else if ... else if gm then em else fail
- where fail = "...some appropriate thing..."
-
- which corresponds to
-
- match [ (if g1 ... then em else fail, [ ~p ]) ]
- [ {-nothing-} ]
- where fail = "...some appropriate thing..."
-
-* "case" expressions (souped up version in static semantics)
-
- case e0 of
- p1 | g11 -> e11
- ...
- | g1? -> e1?
- ...
- pm | gm1 -> em1
- ...
- | gm? -> em?
-
- is the same as
-
- match [ (e0, [p1, ..., pm]) ]
- [ if g11 then e11 else if ... else fail -- E1
- , ... ,
- if gm1 then em1 else if ... else fail
- ]
- where fail = error "pattern-matching case at line xxx failed\n"
-
-* list comprehensions
diff --git a/ghc/docs/NOTES.garbage.collection b/ghc/docs/NOTES.garbage.collection
deleted file mode 100644
index 3260df1aae..0000000000
--- a/ghc/docs/NOTES.garbage.collection
+++ /dev/null
@@ -1,206 +0,0 @@
-
- GARBAGE COLLECTION
- ~~~~~~~~~~~~~~~~~~
-
-The following discussion outlines how the GC is organised and what C
-the compiler needs to produce to use it.
-
-The files associated with GC are:
-
- StgGC.h header file -- macros and externs
- StgCreate.lc GC init routines
- StgOverflow.lhc Overflow routines -- interface to GC
- GC2s.lhc }
- GC1s.lhc } GC control routines
- GCdm.lhc } for each particular GC
- GCap.lhc }
- GCevac.lc Evacuation code fragments (copying GC)
- GCscav.lhc Scavenging code fragments (copying GC)
- GCcompact.lhc Inplace Compacting GC code fragments
- GCmark.lhc Marking code fragments
-
-Other files:
-
- In gctest/
- gctest.c GC Small detail test bed program
-
- In gcstat/
- Performance evaluation stuff
-
-
-Basic Requirements of the C code Produced by the Haskell Compiler
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-There are two main aspects of the compiler generated code that
-interact with GC:
-
-1) Compiled haskell code calls the garbage collection routine when the
- heap overflows by entering the appropriate _HEAP_OVERFLOW_... routine.
-
- These routines isolate register usage and calls the GC control
- routine that was defined at compile time.
-
- For a description of the heap overflow conventions see:
-
- ~grasp/ghc/compiler/absCSyn/RTSLabels.lhs
-
-
- The following must be adhered to by the mutator:
-
- REQUIREMENT COLLECTOR
- SpA and SpB point to A and B stacks all
-
- Hp must point to last word allocated dual,comp
- All updated closures must "know" their original dual,comp
- size
-
- HpLim must point to one beyond top of root stack appel
- Updated closures in the old generation must "know" appel
- their original size
-
- The GC Control routines have to know about the pointer stack and
- Update Stack.
-
-2) The info tables that are pointed to by closures must have the
- appropriate GC routines within them. This is achieved by using the
- following C Macros to declare them:
-
- table_name -- the name given to the info table
- entry_code -- the name of the normal evaluation
- entry code required for the closure
- size -- the No of free var words in the closure
- ptrs -- the number of pointers in the closure
-
-
- SPEC_INFO_TABLE(table_name,entry_code,size,ptrs);
-
- Declares an info table with specialiazed code fragments
- These are currently available for the following closure
- configurations: size, ptrs
- 1,0 2,0 3,0 4,0 5,0
- 1,1 2,1 3,1
- 2,2
- 3,3
- 4,4
- 5,5
- ...
- 11,11
-
- GEN_INFO_TABLE(table_name,entry_code,size,ptrs);
-
- Declares an info table that uses generic code fragments and
- places data to drive these routines in the info table.
- These are available for all combinations of size,ptrs (even
- those for which SPEC routines are provided).
-
-
- STATIC_INFO_TABLE(table_name,entry_code);
-
- Declares an info table suitable for a static closure.
-
-
- DATA_INFO_TABLE(table_name,entry_code);
-
- Declares an info table suitable for a data closure.
- This closure contains no heap pointers and its size
- (of data and size field) in its first word
-
- See NOTES.arbitary-ints
-
-
- IND_INFO_TABLE(table_name,ind_code);
-
- Declares an info table suitable for an indirection.
- But see below !! (ToDo)
-
-
-Using a Particular Garbage Collection Scheme
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-When deciding which collector to use there are two decision points.
-
-At compile time it must be decided which code fragments are going to
-be attached to closures. This will limit the possible choice of GC
-schemes at run time.
-
-To compile the GC code and compiler-produced C Code for a particular
-set of code fragments an appropriate define (-D) directive is given
-to the compiler.
-
-Possible directives are:
-
- Code Fragments GC Control Routines
-
--DGC2s Copying Two Space Collection
-
--DGC1s Marking & Compacting Inplace Compaction
-
--DGCdm Copying, Marking DualMode Collection
- & Compaction (+ TwoSpace and Compaction)
--DGCap Copying, Marking Appels Collector
- & Compaction (+ Compaction)
-
-If none of these are defined the result will be No Collection Schame.
-Heap will be allocated but the program will die if it is ever filled.
-
-Other Directives:
-
--D_GC_DEBUG Provides detailed GC debugging trace output
- (if showGCTrace set)
-
-Note that the GC code will eventually be set up already compiled for
-the different schemes and all that will be required will be to link
-with the appropriate object files. The compiler produced C will still
-need to be compiled with the appropriate define.
-
-
-Trace and Statistics Info
-~~~~~~~~~~~~~~~~~~~~~~~~~
-
-There are a couple of variables that can be set to provide info about
-GC.
-
-showGCTrace -- Provides detailed trace of GC and closure movement
- TRUE -- Summary about GC invokation and heap location
- & 2 -- Detailed trace of copying AND compacting collection
- & 4 -- More detail about linked location lists during compaction
- & 8 -- Detalied info about marking
-
- The & options are only available if compiled with -D_GC_DEBUG
-
-showGCStats -- Provides summary statistics about GC performance
- (ToDo)
-
-ToDo: These should eventually be able to be set by runtime flages
-
-
-Compiler Extensions Required for Compacting Collection
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-There are a number of additional requirements required of the STG
-machine and the resulting C code for Inplace Compaction to work.
-
-The most important and awkward arises from the fact that updated nodes
-will be scanned. This requires updated nodes (blackholes, indirections
-or inplace updates) to know how big the original closure was (so the
-location of the next closure can be determined).
-
-Implications (Suggestions -- Still to be done):
-
- Need specialized black holes info pointers which know their size.
-
- Code on the Update Stack needs to know the orig closure size. Either
- record this size or have specialised update code fragments.
-
- Updated closures need to know orig size. Possible solns are:
-
- Create dummy garbage closures at the end to fill the hole.
-
- Store size of closure in free space beyond and have GC
- routines which look here for the size.
-
- Specialised indirections that know their size.
-
- May be able to search beyond the end of the closure for the next
- info pointer. Possibly blanking out the unused portion of the
- closure.
diff --git a/ghc/docs/NOTES.import b/ghc/docs/NOTES.import
deleted file mode 100644
index 30e65c4221..0000000000
--- a/ghc/docs/NOTES.import
+++ /dev/null
@@ -1,90 +0,0 @@
- Notes on imports
- ~~~~~~~~~~~~~~~~
- SLPJ 15 March 91
-
-
-Distinguish three kinds of things in interfaces:
-
- - type, data, class, instance, value decls at top level
-
- - the same but imported. Syntax
- import B renaming C to D where
- data C = ...
-
- - imports, which serve just to attach original names
- import B(X,Y)
-
-
-The third group are syntactically stuck at the beginning; the second two
-can be intermingled.
-
-Pass 1
-~~~~~~
-Process each imported interface, and the implementation being compiled,
-scanning *headers of*
-
- type, data and class decls (incl imported ones in interfaces)
-
-giving the following environments for each
-
- type/data info {(ModStr,TyConStr) -> arity}
- class info {(ModStr,ClassStr)}
-
-These are filtered (*but not renamed*) by the imports specified in the
-impl (ignore dotdot parts and parts in parens), to give a grand
-environment E1 of the same shape. It gives the original names of in-scope
-types and classes.
-
-Pass 2
-~~~~~~
-Process each imported interface and the implementation being compiled:
-
- - scan its imports and use them to filter and rename E1, to give
-
- {TyConStr -> arity}
- {ClassStr}
-
- - scan type, data, class decls, headers of instance decls
- and value type sigs in interfaces
-
-giving for each:
-
- class info (CE) {ClassStr -> (ClassId, [ClassOpStr])}
- inst info (GIE) {(ClassId,TyConId) -> (Context, GlobalId)}
- (info from both class and instance decls)
-
- type/data info (TCE) {TyConStr -> (TyConId, [ConstrStr])}
-
-
- value info (GVE) {ValStr -> GlobalId}
- (info from value sigs, and constructors from data decls)
-
-Filter and rename the environments gotten from each import to make a grand
-environment E2.
-
-Pass 3
-~~~~~~
-Check E2 for class cycles, and type synonym cycles.
-
-Pass 4
-~~~~~~
-Process the value decls in the impl, giving {ValStr -> GlobalId}, and some
-code.
-
-Pass 5
-~~~~~~
-Process the bodies of instance decls, to generate code for methods.
-
-
-
-
-
-
- UNRESOLVED
- ~~~~~~~~~~
-1. Who generates the interface?
-
-2. Where is dependency analysis done?
-
-
-
diff --git a/ghc/docs/NOTES.interface b/ghc/docs/NOTES.interface
deleted file mode 100644
index dfe2d61b83..0000000000
--- a/ghc/docs/NOTES.interface
+++ /dev/null
@@ -1,54 +0,0 @@
-
-What gets done when printing an interface
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-Basically, we do three things:
-
-A) Create the import decls. For classes and values, this is easy. We
- filter the CE and GVE for all exported objects that were not declared
- in the module. For types, this is a pain because we may have something
- which is exported and which refers to a type that isn't. For example,
- the interface
- interface C where
- ...
- f :: A -> B
- may export B, but A may be expected to come from somewhere else when
- C is imported. So, we have to go through all envs which have ranges that
- may refer to a type. This means the TCE, CE (the class op types),
- GIE_inst (instance types) and GVE (types in the sigs). AND we have to
- filter out prelude defined types from the resulting list.
-
- Finally, we print the import decls, using the conventions that the renamer
- expects (no explicit constructors/ class ops, etc.)
-
-B) Print the fixity decls for whatever constructors/ functions are exported
-
-C) Print the rest of the decls needed.
- 1) Type decls - contents of TCE with export flags
- 2) Class decls - contents of CE with export flags
- 3) Instance decls - contents of GIE_inst that refer to either
- an exported type or an exported class
- (filter then print)
- 4) Value decls - contents of GVE which are not constructors and
- which have an export flag
-
-Issues
-~~~~~~
-
-Type synonyms - to expand or not? Let's not, and complain if a type sig. is
- used but not defined
-
-Canonical form for interfaces - to get rid of perl post-processing!
-
-Deriving for an abstract data type - shall we worry about this now or later?
-
-Printing issues
-~~~~~~~~~~~~~~~
-
-It's convenient to make all ranges of environments know how to print themselves
-(they do now) and decide whether to do so by looking at the export flag
-in their Name fields. Presumably the constructors of a data type that is
-exported abstractly will decide not to print themselves, so no special code
-is needed.
-
-
diff --git a/ghc/docs/NOTES.mkworld2 b/ghc/docs/NOTES.mkworld2
deleted file mode 100644
index 3969d82aba..0000000000
--- a/ghc/docs/NOTES.mkworld2
+++ /dev/null
@@ -1,48 +0,0 @@
-Include order:
-
-# platform info
-# discrim on "trigger" symbols in plat-TRIGGER.jm
-# then slurp in plat-<platform>.jm
-# *-GEN has defaults [if any]
-
-plat-TRIGGER.jm
-plat-<platform>.jm
-plat-GEN.jm
-
-# site overrides
-
-site-<project>-<setup>.jm
-site-<project>.jm
-site-GEN.jm
-
-# <thing>s just for a <project> and its various <setup>s
-
-<thing>-<project>-<setup>.jm
-<thing>-<project>.jm
-
-# things that many projects are likely to use
-
-<thing>-GEN.jm
-
-# finally, the directory-specific stuff
-
-Jmakefile
-
--------------------------------------------------------------------
-must specify platform explicitly
-setup "std", project "none": nothing included
-
--------------------------------------------------------------------
-<Things> that we have files for:
-
-rules: macros related to the main "make" targets
- excpt suffix, everything to make "make" do something is here
- org by principal make target (all, install, etc.)
-
-suffix: things to do w/ make suffix rules (i.e., implicit rules)
-
-utils: utilities that are used in the build process
- (where they are & default options for them)
- (proj file must say which sysutils it wants)
- (the proj files say whether src with or not ==> INeedXXX)
-install: where things are installed, flags for installing
diff --git a/ghc/docs/NOTES.part-of-book b/ghc/docs/NOTES.part-of-book
deleted file mode 100644
index 551dd94aac..0000000000
--- a/ghc/docs/NOTES.part-of-book
+++ /dev/null
@@ -1,73 +0,0 @@
-E.g., for the typechecker sources of the compiler.
-
-% cd compiler/typechecker/
-
-* make a Jmakefile that is NOT plugged into the overall make-world
- system; it will probably look like this:
-
-------------------------------
-/* this is a standalone Jmakefile; NOT part of ghc "make world" */
-
-LitDocRootTargetWithNamedOutput(root,lit,root-standalone)
-------------------------------
-
-* make a "root file", root.lit, to glue the modules together.
-
- At the beginning you'll have something like:
-
- \begin{onlystandalone}
- \documentstyle[11pt,literate,a4wide]{article}
- \begin{document}
- \title{The Glasgow \Haskell{} typechecker}
- \author{The GRASP team}
- \date{October 1991}
- \maketitle
- \tableofcontents
- \end{onlystandalone}
-
- \begin{onlypartofdoc}
- \section[Typechecker]{The typechecker}
- \downsection
- \end{onlypartofdoc}
-
- At the end of the file, you'll need something like:
-
- \begin{onlypartofdoc}
- \upsection
- \end{onlypartofdoc}
-
- \begin{onlystandalone}
- \printindex
- \end{document}
- \end{onlystandalone}
-
- In between, simply \input all the modules, possibly adding some
- sectioning hierarchy:
-
- \section[Typechecker-core]{Typechecking the abstract syntax}
- \downsection
- \input{XXXXXXX.lhs}
- \input{YYYYYYY.lhs}
- \upsection
-
- \section[Typechecker-support]{Typechecker: supporting modules}
- \downsection
- \input{AAAAAAAAAAA.lhs}
- \input{BBBBBBBBBBB.lhs}
- \upsection
-
-* To make your Makefile, do:
-
- % jmkmf -P ghc
-
- (because of a bug, you may have to do it twice :-)
-
-* Then do "make depend".
-
-* Now you are ready for business:
-
- % make root.info
-
- or
-
- % make root.dvi
diff --git a/ghc/docs/NOTES.rename b/ghc/docs/NOTES.rename
deleted file mode 100644
index cca29322ec..0000000000
--- a/ghc/docs/NOTES.rename
+++ /dev/null
@@ -1,109 +0,0 @@
-
-
-
-Questions concerning the meaning of hiding in certain contexts:
-
-1) Suppose we have the interface
- interface A where
- data T = B | C
-
- and the module
- module H where
- import A hiding T
-
- Should this be an error (because T isn't an abstract type in the module)
- or does it just mean the same thing as would
- import A hiding (T (B,C))
- or
- import A hiding (T (..))
- (in other words, hide all of T)
- Will require the user to be precise and flag it as an error - otherwise
- the user may not know that the type is not abstract, thinking that it is.
-
-2) Clearly, we can't allow (assuming the interface above)
- module H where
- import A hiding (T (B))
-
- since that means that a data type with a subset of the constructors is
- exported - similarly for classes
-
-3) Suppose an interface exports an abstract type H. Can H be referred
- to as H (..), or is that an error? Let's require precision and call it
- an error.
-
---------------- new design for renamer -------------------
-
-Changes to abstract syntax
-
-1) ClsSigs becomes Sigs
-
-2) Instances need new syntax (bool) distinguishing between those which
-come from an interface and those which come from a module.
-
-The renamer is factored into four passes, as follows:
-
-1) FLATTEN INTERFACES -
- insert original names into interfaces. All of the decls imported
- from the interfaces are collected and returned, in an otherwise
- unchanged module. No interfaces exist after this pass.
-
-2) Do consistency checks (equality). Return the module including the surviving declarations.
-
-3) build the global name function, which will maintain two separate
- namespaces.
-
-4) assign names to the entire module, and do dependency analysis.
-
-As the prelude environments will yield names, the first pass will replace
-QuickStrings with constructors of the ProtoName type, defined as
-
-data ProtoName = Unknown QuickString
- -- note that this is the name local to the module
- | Imported QuickString QuickString QuickString
- | Prelude Name
-
-The parser will initially make all QuickStrings Unknown.
-
-Modules must now include signatures for value decls at top level.
-
-The entire set of passes have the following types:
-
-type PrelNameFuns = (GlobalNameFun, GlobalNameFun)
-
-type GlobalNameFun = ProtoName -> Maybe Name
-
-renameModule :: PrelNameFuns -> ProtoNameModule -> RenameMonad RenamedModule
-
-renameModule1 :: PrelNameFuns -> ProtoNameModule -> RenameMonad ProtoNameModule
-
-processModImports1 :: PrelNameFuns -> ProtoNameImportDecls
- -> RenameMonad (ProtoNameFixityDecls, ProtoNameTyDecls,
- ProtoNameClassDecls, ProtoNameInstDecls,
- ProtoNameSigDecls)
-
-renameModule2 :: ProtoNameModule -> RenameMonad ProtoNameModule
-
-renameModule3 :: PrelNameFuns -> ProtoNameModule -> GlobalNameFun
-
-renameModule4 :: GlobalNameFun -> ProtoNameModule -> RenameMonad RenamedModule
-
-renameModule :: PrelNameFuns -> ProtoNameModule -> RenameMonad RenamedModule
-renameModule pnf mod
- = (renameModule1 pnf mod) `thenRenameM` (\ mod_with_orig_interfaces ->
- (renameModule2 mod_with_orig_interfaces)
- `thenRenameM` (\ mod_minus_interfaces ->
- (renameModule3 pnf mod_minus_interfaces)
- `thenRenameM` (\ global_name_fun ->
- (renameModule4 mod_minus_interfaces global_name_fun))))
-
-Namespace confusion: According to the report (1.1), `An identifier must
-not be used as the name of a type constructor and a class in the same
-scope.' This is apparently the only constraint on the namespace, other
-than those implied by the conventions for identifiers. So, what are the
-namespaces?
-
-1) variables and class operations, constructors
-
-2) type constructors and classes (because of the statement above)
-
-
diff --git a/ghc/docs/NOTES.saving-space b/ghc/docs/NOTES.saving-space
deleted file mode 100644
index cd43c37f64..0000000000
--- a/ghc/docs/NOTES.saving-space
+++ /dev/null
@@ -1,250 +0,0 @@
-Ways to save code space
-~~~~~~~~~~~~~~~~~~~~~~~
-SLPJ/BOS 16 Sept 94
-
-
-
-
-
-Heap entry points
-~~~~~~~~~~~~~~~~~
-We have lots of thunks of the form
-
- let
- x = f p q r
- in ...
-
-where f is know function of arity 3 (ie saturated).
-At the moment we generate special code for this one closure,
-which:
- pushes an update frame
- loads p,q,r into registers from the closure (or using
- immediate loads if they are literals),
- jumps to f_fast.
-
-Since there are quite a lot of thunks of this form, the idea is to
-generate some code (and its info table) just once, *with the
-definition of f*, which does exactly as described above. We can then
-use this code for every thunk of (exactly) this form. Call this
-the "heap entry" for f:
-
- slow entry: args on stack
- fast entry: args in regs
- heap entry: args in closure pointed to by Node
-
-So the thunk for x would look like this:
-
- -----------------
- x = | * | p | q | r |
- --|--------------
- |
- | common heap entry code for f
- ------> push update frame
- R2 := R1[2] -- Second arg
- R3 := R1[3] -- Third arg
- R1 := R1[1] -- First arg
- goto f_fast
-
-The jump to f_fast can be implemented as a fall-through. (The
-slow entry point can take a jump instead!)
-
-Of course there are also lots of thunks which *aren't* of the heap-entry
-form:
- x = case y of ...
- x = let v = ... in ...
- etc
-
-Things to watch out for:
-
-* Literal args. Consider
-
- x = f 2 p 4
-
-We don't *have* to use the heap entry for f (we could generate special
-code+info table as we do now), but we *can* use it provided we
-generate a thunk with 2 and 4 stored in it as well as p:
-
- -----------------
- | * | 2 | p | 4 |
- --|--------------
- |
- | common heap entry code for f
- ------> push update frame
- R2 := R1[2] -- Second arg
- R3 := R1[3] -- Third arg
- R1 := R1[1] -- First arg
- goto f_fast
-
-(If we have special code the thunk needs only p stored in it, because
-the special code can use immediate constants for 2 and 4:
-
- ---------
- | * | p |
- --|------
- |
- | special code for x
- ----> push update frame
- R2 := R1[1] -- Second arg
- R3 := 4 -- Third arg
- R1 := 2 -- First arg
- goto f_fast
-
-
-* Single-entry thunks. If x is a single-entry thunk, there's no need to
-push an update frame. That suggests:
-
- ---------------
- x = | * | 2 | p 4 |
- --|------------
- |
- | heap entry code for f
- ----> -- NO! NO! push update frame
- R2 := R1[2] -- Second arg
- R3 := R1[3] -- Third arg
- R1 := R1[1] -- First arg
- goto f_fast
-
-Let's call the two variants the
- standard heap entry
-and no-update heap entry
-
-We can't fall through from the standard heap-entry code (which pushes
-an update frame) to the arg-loading code, because both need an info table.
-We have to take a jump.
-
-For non-exported functions we may be able to see that only one of the
-two heap entries is required.
-
-* Local functions. When f is a *local* (ie not top-level) function, its
-fast-entry convention is that
- R1 = the function closure
- R2.. = the args
-
-For example:
-
- top p q = let
- f = \r -> ..r..p...q...
- in
- let
- x = f q
- in
- ...
-
-
-The shape of the heap-entry closure for f must be
-
- -------------
- x = | * | f | q |
- --|----------
- |
- -------> heap entry code
- must load *f* into R1 as well as q and
- the other args
-
-
-
-
-
-Avoiding generating entries and info tables
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-At present, for every function we generate all of the following,
-just in case. But they aren't always all needed, as noted below:
-
-[NB: all of this applies only to *functions*. Thunks always
-have closure, info table, and entry code.]
-
-
-* Fast-entry code ALWAYS NEEDED
-
-* Slow-entry code
- Needed iff (a) we have any un-saturated calls to the function
- OR (b) the function is passed as an arg
-
-* The function closure
- Needed iff (a) we have any un-saturated calls to the function
- OR (b) the function is passed as an arg
- OR (c) if the function has free vars (ie top level)
-
- Why case (a) here? Because if the arg-satis check fails,
- UpdatePAP stuffs a pointer to the function closure in the PAP.
- [Could be changed; UpdatePAP could stuff in a code ptr instead,
- but doesn't seem worth it.]
-
- [NB: these conditions imply that we might need the closure
- without the slow-entry code. Here's how.
-
- f x y = let g w = ...x..y..w...
- in
- ...(g t)...
-
- Here we need a closure for g which contains x and y,
- but since the calls are all saturated we just jump to the
- fast entry point for g, with R1 pointing to the closure for g.]
-
-
-* Slow-entry info table
- Needed iff (a) we have any un-saturated calls to the function
- OR (b) the function is passed as an arg
- OR (c) the function has free vars (ie top level)
-
- NB. (c) is only required so that the function closure has
- an info table to point to, to keep the storage manager happy.
- If (c) alone is true we could fake up an info table by choosing
- one of a standard family of info tables, whose entry code just
- bombs out.
-
- If (c) is retained, then we'll sometimes generate an info table
- (for storage mgr purposes) without slow-entry code. Then we need
- to use an error label in the info table to substitute for the absent
- slow entry code.
-
-* Standard heap-entry code
- Standard heap-entry info table
- Needed iff we have any updatable thunks of the standard heap-entry shape.
-
-* Single-update heap-entry code
- Single-update heap-entry info table
- Needed iff we have any non-updatable thunks of the
- standard heap-entry shape.
-
-
-All are needed if the function is exported, just to play safe.
-
-Idea: generate just the stuff we need!
-
-
-
-\begin{code}
-staticClosureRequired -- Assumption: it's a top-level, no-free-var binding
- :: StgBinderInfo
- -> [Id] -- Args
- -> Bool
-staticClosureRequired (StgBinderInfo arg_occ unsat_occ _ _) args
- = arg_occ || -- There's an argument occurrence
- unsat_occ || -- There's an unsaturated call
- null args -- It's a thunk
-
-staticClosureRequired NoStgBinderInfo args = True
-
-
-
-slowFunEntryCodeRequired -- Assumption: it's a function, not a thunk.
- :: StgBinderInfo
- -> Bool
-slowFunEntryCodeRequired (StgBinderInfo arg_occ unsat_occ _ _)
- = arg_occ || -- There's an argument occurrence
- unsat_occ -- There's an unsaturated call
-slowFunEntryCodeRequired NoStgBinderInfo = True
-
-
-funInfoTableRequired -- Assumption: it's a function, not a thunk.
- :: Bool -- Top level?
- -> StgBinderInfo
- -> Bool
-funInfoTableRequired top_level (StgBinderInfo arg_occ unsat_occ _ _)
- = not top_level ||
- arg_occ || -- There's an argument occurrence
- unsat_occ -- There's an unsaturated call
-
-funInfoTableRequired top_level NoStgBinderInfo = True
-\end{code}
diff --git a/ghc/docs/NOTES.update-mechanism b/ghc/docs/NOTES.update-mechanism
deleted file mode 100644
index 5072cd87d5..0000000000
--- a/ghc/docs/NOTES.update-mechanism
+++ /dev/null
@@ -1,195 +0,0 @@
- The Glorious New Update Mechanism
- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- Simon & Jim Dec 93
-
-Return convention
-~~~~~~~~~~~~~~~~~
-When a constructor returns it makes sure that
-
- R2 contains the info pointer for the constructor
- R1,R3.. contain the components (if return in regs)
- R1 points to the constructor object itself (if return in heap)
-
-The info table for a constructor contains a pointer to the
-constructor's update code. If a constructor returns to an
-update frame, the update frame's code just jumps direct to the
-constructor's update code, via the info pointer in R2.
-
-This penalises slightly the return of a new constructor,
-because we have to load R2 with the info ptr. [Fact: in runs
-of the compiler, 20-30% of all returns are of a new constructor;
-70-80% are existing constructors.]
-
-Info tables
-~~~~~~~~~~~
-Each dynamic-heap-allocated constructor has *two* info tables:
-
-* the "NewCon" info table is put into R2 when returning a new
- constructor, which does not yet exist in the heap; R1 is dead!
- The "NewCon" info table has no GC entries, because it's only ever used
- when returning in regs, never installed in a real constructor.
-
- The NewCon table also needs a valid tag field (see taggery below)
-
-* the "ExistingCon" info table is used for all constructors allocated
- in the heap.
-
-The update code for a constructor
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-The update code for a constructor actually performs the update
-right away. [At present, the update is deferred until we get
-back to the case expression.] It knows how to do the update
-because the update code is constructor-specific.
-
-Once it's done the update, it makes R1 point to the constructor object
-in the heap (which'll either be freshly-allocated, if big, or the
-updated thing itself), and (for non-niladic constructors) makes R2 point
-to the "ExistingCon" info table for the constructor. (Of course the new
-constructor will also have an ExistingCon info ptr.) For niladic
-constructors, we do *not* use the "ExistingCon" info table. We continue
-to overwrite updatees in-place, because this saves us an indirection
-prior to garbage collection (and the extra niladic constructors disappear
-during the next garbage collection anyway).
-
-The update code in the ExistingCon info table simply updates with an
-indirection, using R1. I *think* this can be one standard piece of
-code. The only doubt here concerns GC; if updating with an
-indirection can cause GC (possible on GRIP? or generational GC?),
-then we need to know which regs are live. We can solve this by
-putting a liveness mask in the info table too. [Arguably we want
-that anyway; consider returning to the bottom of a stack object.]
-So a liveness mask in the info table is probably a good idea.
-
-Constructors which return in heap return with an ExistingCon info
-ptr. They don't need a NewCon info table at all.
-
-Notice that this means that when we return an *existing* constructor,
-to an update frame, the update is done with an indirection, rather
-than [as now] copying the constructor afresh. This solves the space duplication
-problem which shows up in "clausify".
-
-GC: R1 might be dead; R2 is a non-ptr. So this return convention
-relies on using liveness masks for GC reg-liveness info, not the
-old no-of-live-ptrs info.
-
-Taggery
-~~~~~~~
-
- [Current: For unvectored returns with more than one constructor, we
- currently load TagReg, and scrutinise it in the case expression.
- Worse, we also have to scrutinise TagReg in the update entry of the
- return vector.]
-
-In the new world, updates are handled without any nonsense. No need
-to look at any register, becase we just jump to the constructor
-specific update code.
-
-Since we have an info ptr in R2, we can get the tag out of the info
-table, thus getting rid of TagReg altogether. (This could conceivably
-be a *lose* on a machine with lots of regs, because it replaces a
-immediate small-const load by a memory fetch of the tag from the info
-table.
-
-Not clear whether this is worth trying to improve. Could
-
- a) #define TagReg to be a register or an offset from R2
- b) issue a SET_TAG macro in the entry code for a constructor,
- which usually expands to nothing
-
-[NB 75-95% of all returns are vectored in runs of the compiler itself.]
-
-The entry code for a constructor
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-The real reason the registers are assigned as above is to make the
-entry code for a constructor simple. When the entry code is executed,
-we have a new entry convention:
-
- R1 points to the object
- R2 is its info pointer
-
-(Why? because we usually enter it by indirecting through its info
-table, so it seems a shame to load the info ptr from memory twice.)
-
-So all the entry code has to do is to return (perhaps vectored-ly).
-(Maybe load TagReg, usually not --- see above.)
-
-NB this entry convention applies, of course, to all thunks as
-well as constructors -- whenever we enter an unknown object via R1 (Node).
-
-Case expressions
-~~~~~~~~~~~~~~~~
-Return vectors no longer need update code.
-
-Unvectored returns can therefore be *direct* to the code,
-rather than *indirect* via a 2-entry vector.
-
-Penalty for this improvement: "polymorphic" return vectors,
-notably that in an update frame, needs to accomodate either
-a direct or a vectored return. So it has to look like:
-
- UpdVec: jmp UnvectoredUpd
- .word UpdVec0
- .word UpdVec1
- ...
-
-that is, the return vector is offset by some fixed amount
-from the pointer put on the stack. Or, it could be done
-backwards:
-
- ...
- .word UpdVec1
- .word UpdVec0
- UpdVec: ...code for UnvectoredUpd...
-
-and then vectored returns would use negative offsets.
-
-This grunge is necessary *only* for a fixed set of polymorphic return
-vectors, part of the runtime system:
-
- - update frames
- - restore cost centres
- - seq, par
- - thread base
- - stack object base
-
-Case expressions generate either a direct return, or a vector,
-but never a combination.
-
-Update Frames
-~~~~~~~~~~~~~
-
-Standard update frames are still required if we don't know the type of
-the constructor being returned. However, we often do know the type. In
-this case, we can generate a type-specific updating return-vector to place
-in the update frame rather than the StdUpdRetVector. This saves us one
-level of indirection.
-
-Partial applications
-~~~~~~~~~~~~~~~~~~~~
-PAPs are basically handled just the same way as at present.
-
-Changes from now
-~~~~~~~~~~~~~~~~
-* UpdReg dies.
-* TagReg dies.
-* RetVecReg dies. (Previously needed to do return after update.)
-* Return vectors have half the number of entries.
-* Unvectored returns go direct.
-* Polymorphic seq/par and friends.
-* No space duplication problem (cf clausify)
-
-
-Glosses
-~~~~~~~
-Tag and update code are needed only for constructor info tables.
-It seems a shame to take up space in other info tables (ie 99% of them).
-
-Possibilities:
-
-- use an indirection to GC code, so the vari-sized gc stuff becomes
- fixed
-- put the tag/upd code ptrs before the start of the info table. (or
- between the info table and code when reversing info tables...)
-
-
-Looks tricky to me.
diff --git a/ghc/docs/Prefix_Form b/ghc/docs/Prefix_Form
deleted file mode 100644
index 43daaba644..0000000000
--- a/ghc/docs/Prefix_Form
+++ /dev/null
@@ -1,294 +0,0 @@
- Haskell Prefix Form
- ===================
-
-This defines the interface between the Haskell lexer/parser "hsp"
-(written in lex/yacc/C) and the Haskell compiler proper, "hsc"
-(written in Haskell).
-
-White space in the form of spaces, tabs and newlines may occur between
-prefix items (I wouldn't recommend it [WDP]). A literal tab
-terminates a string constant (and by extension also an integer
-constant, character constant, identifier or infix operator).
-
-There is a general list form, where L indicates a Cons node and N
-indicates Nil. Elements in a list may be of arbitrary type.
-
-KH, 22/08/91: Changed for Haskell 1.1+ -- this is where Haskell and LML
- finally part company...
-
-JH, 09/07/92: {0,5,6,I,O,u,v,w} Used in Data Parallel Haskell variant
- (Jonathan Hill, QMW)
-
-WDP, 02/04/93: Added full-blown pragmas.
-WDP, 15/08/93: Added even more full-blown pragmas.
-
-`Tag' namespace already used:
-
-#$%()+,-.012356789:=>@ABCDEGILMNOPQRSTUWXZ_abcdefghijklmnopstuvwxyz~
-
- Meta-Syntactic Items
- --------------------
-
-Lists (l)
----------
-
-L el l List constructor el : l
-N Null List []
-
-
-Strings (s)
------------
-
-#chars<TAB> String of characters before <TAB>
-
-
-
-
- Syntactic Items
- ---------------
-
-Expressions or Patterns (e or p)
---------------------------------
-
-M L s s b [op] [ix] [ei]
- Haskell module:
- (name, file, binding, fixities, imports, exports)
-
-4 s Integer Constant 0, 1, ...
-H s Unboxed Integer constant 0#, 1#, ... /* WDP */
-F s Floating Point Constant 0.1, 0.2e-3, ...
-J s Unboxed Double Constant 0.1##, 0.2e-3##, ... /* WDP */
-K s Unboxed Float Constant 0.1#, 0.2e-3#, ... /* WDP */
-C s Character Constant '...'
-P s Unboxed character constant ???????? /* WDP */
-S s String Constant "..."
-V s String# Constant "..."# /* WDP */
-Y s "Literal C" Constant ``printf'' /* WDP */
-I s "no rep" Integer (unfoldings)
-R s s "no rep" Rational (numerator, denominator)
-s s "no rep" String constant (unfoldings)
-
-i id Identifiers
-C literal Literal constant
-
-a e1 e2 Function application (e1 e2)
-@ e1 id e2 Infix Application e1 `op` e2
-( e id Left section (e op)
-) id e Right Section (op e)
-
-l L [p] e Lambda Expressions \ p1 ... pn -> e
-c e [pb] Case Expression case e of pb1 ... pbn
-b e1 e2 e3 If expression if e1 then e2 else e3
-E b e Let Expression let b in e
-
-: [e] Explicit List [ e1, ..., en ]
-Z e [q] List Comprehension [ e | q ]
-. e1 [e2] [e3] Enumeration (e2/e3 may be []) [e1,e2..e3]
-
-, [e] Explicit Tuple ( e1, ..., en )
-
-R e t Type Constraint e :: t
-
-- e Negation - e
-
-j id s [e] C Call/Asm (ccall/asm id/str e1 ... en)
- s == "n" --> ccall, non-ptrs only
- s == "p" --> ccall, ptrs allowed
- s == "N" --> casm, non-ptrs only
- s == "P" --> casm, ptrs allowed
-k s e Set Cost Centre (scc s e)
-
-s id p As Pattern id @ p
-~ p Irrefutable Pattern ~ p
-+ p e Plus Pattern n + k
- /*WDP: why a "p" on the plus pat? (KH: historical reasons) */
-_ Wildcard Pattern _
-
-
-
-Qualifiers (q)
---------------
-
-G p e Generator p <- e
-g e Guard e
-
-Bindings (b)
-------------
-
-t L C [id] t [d] iprag DataType Binding data C => t = d1 | ... | dn
- deriving (id1, ..., idm)
-n L t1 t2 iprag Type Synonym type t1 = t2
-p L [pb] Pattern Binding pb1 ; ... ; pbn
-f L [pb] Function Binding pb1 ; ... ; pbn
-A b1 b2 Multiple Definitions b1 ; b2
-$ L C t b iprag Class class C => t [where b]
-% L C id t b iprag Instance instance C => id t [where b]
-D L [ty] Default default (ty1, ..., tyn)
-
-St L [id] t iprag Type Signature id1, ...., idn :: t
-Ss L id [t] Pragma: value specialis'ns {-# SPECIALISE id :: t1, ... tn #-}
-SS L id t Pragma: instance specialis'n {-# SPECIALISE instance id t #-}
-Si L id [id] Pragma: inline -- id [howto] {-# INLINE id [id]{0,1} #-}
-Su L id Pragma: magic unfolding {-# MAGIC_UNFOLDING id #-}
-Sa L id Pragma: abstract synonym {-# ABSTRACT id #-}
-
-7 L id [ei] [rn] Import module (Interface only) import id (eis) renaming rn
-B null binding
-
-
-Fixity declarations (op)
---------------
-/* WDP: most suspicious how these don't appear in interfaces */
-/* WDP: need line numbers? */
-
- s1 s2 s3 s1 is the operator name
- s2 is either "infix", "infixl" or "infixr"
- s3 is the precedence
-
-Types (t)
----------
-
-T id [t] Type Constructor id t1 ... tn
-: t Explicit List Type [t]
-, [t] Explicit Tuple Type (t1, ..., tn)
-> t1 t2 Explicit Function Type t1 -> t2
-y id Type Variable id
-3 C t Type Context C => t
-
-2A id t "UniDict": class, type (core unfoldings only)
-2B id "UniTyVarTemplate"
-2C [id] t "UniForall" tv1 ... tvN => type
-
-2D Nothing (Maybe UniType)
-2E t Just t (ditto)
-
-Contexts (C)
-------------
-
- [t] Haskell context: t1, ..., tn
-
-
-Data Types (d)
---------------
-
-1 L id [t] Data constructor id st1 ... stn
-
-
-Pattern Bindings (pb)
----------------------
-
-W L id p [ge] b Single grhs p | g1 = e1 ... where b
- (L,id) = (Line,Function)
-
-
-Guarded Expressions (ge)
-------------------------
-
- g e g | e (unguarded comes in with an
- unsavoury (Var "_otherwise") `guard')
-
-
-Identifiers (id)
-----------------
-
- s An identifier is just a string
-
-
-Import declarations (ix)
-------------------------
-
-e L s id [ei] [rn] b Line, File Name, Module name, imported entities,
- renamings, interface body
-h L s id [ei] [rn] b Hiding clause, as above...
-
-
-Renamings (rn)
---------------
-
- id id From name, to name
-
-
-Exported/Imported Identifers (ei)
----------------------------------
-
-x id ExImport Variable
-X id ExImport Constructor/Type/Class
-z id ExImport Class/Type plus all ops/cons
-8 id [id] ExImport Type(C1,..Cn)
-9 id [id] ExImport Class(o1,..on)
-m id Export Module
-
-
-Interface pragmas (iprag)
--------------------------
-
- User pragmas come in as "signatures"; see Bindings.
-
-PN Null/empty pragma
-
-Pd [d] Data pragma: otherwise-hidden data constructors
-Pt Type pragma: synonym is *abstract*
-Pc [gprag] Class pragma: one gprag per superclass
-Po gprag gprag Class op pragma: gprags for dsel & defm
- OUT: Pv gprag [2prag] Value pragma: gprag + specialisations
-Pis id gprag Instance pragma (simple): id is module name, gprag for dfun
-Pic id gprag [1prag] Ditto (constant methods): also gprags for classops
-PiS id gprag [3prag] Ditto (specialised): also (type, instance-prag) pairs
-
-Pg Aprag uprag Sprag Uprag
- General ("gprag"): arity, update, strictness, unfolding
-
-PA id Arity ("Aprag"): arity
-Pu id Update ("uprag"): update spec
-PS id gprag Strictness ("Sprag"): strictness spec, gprag for worker
-PM id id Magic unfolding ("Uprag"): var, magic-name string
-PU id core Unfolding ("Uprag"): "guidance", corexpr-unfolding
-
-Px Unfold always (these are "guidance" ones...)
-Py id id Unfold if applied to "m" ty args, "n" val args
-Pz id id Unfold if applied to "m" ty args, & cons in marked val positions
- (e.g., "CXXCXX" ==> in 1st & 4th arg positions)
-
-P1 id gprag ("1prag"): (name, gen-pragma) pair
-P2 t gprag ("2prag"): (type, gen-pragma) pair
-P3 t gprag [iprag] ("3prag"): (type, [(gen-pragma, instance-pragma)]) pair
-
-
-Core syntax [in iprags] (core)
-------------------------------
-
-Fa
-Fb
-<etc -- lots of this stuff>
-
-Used in Data Parallel Haskell variant (Jonathan Hill, QMW)
-----------------------------------------------------------
-
- ** Expressions **
-
-5 e [parqual] Parallel ZF expression << e | [parquals] >>
-6 [e] Pod Literal << e1,...,ek>>
-O [e] e Processor (|e1,...,ek;e|)
-
- ** Types **
-
-u [t] t Explicit Processor Type (|t1,..tn;t|)
-v [t] Explicit Pod Type <<t>>
-
- ** Parallel Qualifiers **
-
-0 e e Drawn From Generator exp <<- exp
-w e e Indexed From Generator exp <<= exp
-I e Guard
-
-
- Other Items
- -----------
-
-Line Numbers (L)
-----------------
-
- s Haskell line number
-
-
-Kevin Hammond @ 22nd. August 1991
diff --git a/ghc/docs/README b/ghc/docs/README
index d9c06dde52..464f301aa4 100644
--- a/ghc/docs/README
+++ b/ghc/docs/README
@@ -2,8 +2,6 @@ Herein are bits of documentation for, or related to, the Glorious
Glasgow Haskell compilation system. Unless specified otherwise, they
are nestled in the ghc/docs directory of the distribution.
-== semi-proper documentation ==========================================
-
install_guide/*
A step-by-step guide on how to configure, build, and install
the system.
@@ -18,54 +16,16 @@ release_notes/*
Release notes for the system. What's new in each release, and
what's coming Real Soon Now.
-io-1.3/*
- The *draft* Haskell 1.3 I/O proposal at December 1994. In
- HTML format. Unstructured.html gives you the whole thing in
- one big wad.
-
-state-interface.dvi
+state_interface/*
"GHC prelude: types and operations", an addendum to the Peyton
Jones/Launchbury "state" paper, is the definitive reference
(bar the compiler source :-) of GHC's lowest-level interfaces
(primitive ops, etc.).
-add_to_compiler/paper.dvi
- An overview of how to add a piece to the compiler.
+gransim/
+ User's guide for the GranSim parallel-machine simulator.
+ By Hans Wolfgang Loidl.
simple-monad.lhs
A *simple* introduction to the common use of monads in Haskell
programming. No fancy stuff whatsoever. By Will Partain.
-
-../../mkworld/docs/mkworld_guide/*
- A guide to the ``make world'' configuration system ... ``for
- the brave.'' The "configure" script (versions 0.22ff) make
- this a little less visible than before.
-
-../../literate/doc/*
- The documentation for the ``literate programming'' stuff, if
- you're interested.
-
-== relevant papers and abstracts ======================================
-
-Consult ghc/docs/abstracts/abstracts<year>.tex for information about
-Glasgow work related to the GHC system. Other relevant material is
-listed here. All of it is available by FTP.
-
-Haskell report, version 1.2 (the latest)
- It was in your May, 1992, SIGPLAN Notices. Not in the
- distribution but in ~ftp/pub/haskell/report/ (the usual
- places).
-
-Haskell tutorial, by Paul Hudak and Joe Fasel
- Ditto. In ~ftp/pub/haskell/tutorial/; usual places.
-
-== notes and things ===================================================
-
-NOTES.* Random collections of notes on topics *. Check the
- modification dates to see what's new... Don't believe
- everything you read.
-
-MAIL* Files holding some relevant correspondence.
-
-README files
- A few of these actually exist and tell the truth.
diff --git a/ghc/docs/abstracts/README b/ghc/docs/abstracts/README
deleted file mode 100644
index a3c07a8729..0000000000
--- a/ghc/docs/abstracts/README
+++ /dev/null
@@ -1,4 +0,0 @@
-A straight copy of ~grasp/docs/abstracts/*.{tex,sty}.
-
-Will Partain
-partain@dcs.glasgow.ac.uk
diff --git a/ghc/docs/abstracts/abstracts.sty b/ghc/docs/abstracts/abstracts.sty
deleted file mode 100644
index 0965be647e..0000000000
--- a/ghc/docs/abstracts/abstracts.sty
+++ /dev/null
@@ -1,30 +0,0 @@
-\newcounter{refnumber}[section]
-
-\renewcommand{\therefnumber}{\arabic{refnumber}}
-
-\newcommand{\reference}[4]{ % authors, title, details, abstract
-\refstepcounter{refnumber}
-\large
-{\bf \therefnumber.} #1, {\em #2}, #3.
-\normalsize
-\begin{quote}
-#4
-\end{quote}
-\vspace{0.2in}
-}
-
-\newcommand{\shortreference}[3]{ % authors, title, details
-\large
-$\bullet$ #1, {\em #2}, #3.
-}
-
-
-\newcommand{\GlasgowNinetyTwo}[1]
- {Functional Programming, Glasgow 1992, Springer Verlag Workshops in Computing}
-\newcommand{\GlasgowNinetyThree}[1]
- {Glasgow Functional Programming Group Workshop, Ayr, July 1993}
-\newcommand{\GlasgowNinetyOne}
- {Functional Programming, Glasgow 1991, Springer Verlag Workshops in Computing}
-
-% \newcommand{\Haskell}[1]{{\sc Haskell}}
-
diff --git a/ghc/docs/abstracts/abstracts89.tex b/ghc/docs/abstracts/abstracts89.tex
deleted file mode 100644
index e4fe15e542..0000000000
--- a/ghc/docs/abstracts/abstracts89.tex
+++ /dev/null
@@ -1,487 +0,0 @@
-\documentstyle[11pt,slpj]{article}
-
-\newcommand{\reference}[4]{ % authors, title, details, abstract
-\large
-#1, {\em #2}, #3.
-\normalsize
-\begin{quotation}
-#4
-\end{quotation}
-\vspace{0.2in}
-}
-
-\newcommand{\Haskell}[1]{{\sc Haskell}}
-
-\begin{document}
-
-\title{Abstracts of GRIP/GRASP-related papers and reports till 1989\\
-Dept of Computing Science \\
-University of Glasgow G12 8QQ}
-
-\author{
-Cordelia Hall (cvh@cs.glasgow.ac.uk) \and
-Kevin Hammond (kh@cs.glasgow.ac.uk) \and
-Will Partain (partain@cs.glasgow.ac.uk) \and
-Simon L Peyton Jones (simonpj@cs.glasgow.ac.uk) \and
-Phil Wadler (wadler@cs.glasgow.ac.uk)
-}
-
-\maketitle
-
-\begin{abstract}
-We present a list of papers and reports related to the GRIP
-and GRASP projects,
-covering {\em the design, compilation technology,
-and parallel implementations of functional programming languages, especially
-\Haskell{}}.
-
-Most of them can be obtained by writing to
-Teresa Currie, Dept of Computing Science,
-University of Glasgow G12 8QQ, UK. Her electronic mail address is
-teresa@uk.ac.glasgow.cs.
-
-Those marked ($\spadesuit$) can be obtained from the School of Information
-Systems, University of East Anglia, Norwich, UK.
-Those marked ($\clubsuit$) can be obtained from Chris Clack at the
-Department of Computer Science, University College London, Gower St,
-London WC1E 6BT, UK.
-\end{abstract}
-
-\section{Published papers}
-
-\reference{Simon L Peyton Jones and Jon Salkild}
-{The Spineless Tagless G-machine}
-{Proc IFIP Symposium on Functional Programming Languages and Computer
-Architecture, London, Sept 1989}
-{
-The Spineless Tagless G-machine is an abstract machine based on graph
-reduction, designed as a target for compilers for non-strict functional
-languages.
-As its name implies, it is a development of earlier work, especially
-the G-machine and Tim.
-
-It has a number of unusual features: the abstract machine code is
-rather higher-level than is common, allowing better code generation;
-the representation of the graph eliminates most interpretive overheads;
-vectored returns from data structures give fast case-analysis;
-and the machine is readily extended for a parallel implementation.
-
-The resulting implementation runs at least 75\% faster
-than the Chalmers G-machine.
-}
-
-\reference{Simon L Peyton Jones}
-{Parallel implementations of functional programming languages}
-{Computer Journal 32(2), pp175-186, April 1989}
-{
-It is now very nearly as easy to build a parallel computer
-as to build a sequential one, and there are strong incentives to do so:
-parallelism seems to offer the opportunity to improve both the
-absolute performance level and the cost/performance ratio of our machines.
-
-One of the most attractive features of functional programming languages
-is their suitability for programming such parallel computers.
-This paper is devoted to a discussion of this claim.
-
-First of all, we discuss parallel functional programming
-from the programmer's point of view.
-Most parallel functional language implementations are based on graph reduction,
-we proceed to a discussion of some implementation issues raised by parallel
-graph reduction.
-The paper concludes with a case study of a particular parallel graph reduction
-machine, GRIP, and a brief survey of other similar machines.
-}
-
-\reference{Kevin Hammond}
-{Implementing Functional Languages for Parallel Machines}
-{PhD thesis, University of East Anglia, 1989 ($\spadesuit$)}
-{Commencing with the Standard ML language, dialects XSML and PSML are
-defined, which permit parallel evaluation of functional programs. XSML
-is Standard ML with a novel mechanism for handling exceptions; PSML is a
-side-effect free version of XSML. A formal semantics for PSML and a
-translation algorithm from this language into Dactl, a compiler target
-language based on the theory of graph-rewriting, are presented. The
-thesis proves that a simplified version of this translation preserves
-meaning for flat domains, and that the strategy for reduction to normal
-form is correct.
-
-The implementation of standard compilation techniques such as strictness
-analysis, maximal free sub-expression elision and common sub-expresssion
-elimination is considered with respect to Dactl, and problems
-highlighted. Techniques are also presented for compiling
-exception-handling correctly in a parallel environment, and for
-compiling side-effect for a parallel machine. Metrics for performance
-evaluation are presented and results obtained using the Dactl reference
-interpreter are presented.}
-
-
-\reference{Simon L Peyton Jones, Chris Clack and Jon Salkild}
-{High-performance parallel graph reduction}
-{Proc Parallel Architectures and Languages Europe (PARLE), LNCS 365, pp193-207,
-July 1989}
-{
-Parallel graph reduction is an attractive implementation for functional
-programming languages because of its simplicity and inherently distributed
-nature.
-This paper outlines some of the issues raised by parallel compiled
-graph reduction, and presents the solutions we have adopted to produce an
-efficient implementation.
-
-We concentrate on two particular issues:
-the efficient control of parallelism, resulting in an ability to alter
-the granularity of parallelism
-{\em dynamically};
-and the efficient use of the memory hierachy to improve locality.
-}
-
-\reference
-{Phil Trinder and Philip Wadler}
-{Improving list comprehension database queries}
-{{\em TENCON '89\/} (IEEE Region 10 Conference),
-Bombay, India, November 1989.}
-{
-The task of increasing the efficiency of database queries has recieved
-considerable attention. In this paper we describe the improvement of
-queries expressed as list comprehensions in a lazy functional
-language. The database literature identifies four algebraic and two
-implementation-based improvement strategies. For each strategy we show
-an equivalent improvement for queries expressed as list
-comprehensions. This means that well-developed database algorithms
-that improve queries using several of these strategies can be emulated
-to improve comprehension queries. We are also able to improve queries
-which require greater power than that provided by the relational
-algebra. Most of the improvements entail transforming a simple,
-inefficient query into a more complex, but more efficient form. We
-illustrate each improvement using examples drawn from the database
-literature.
-}
-
-\reference{Kevin Hammond}
-{Exception Handling in a Parallel Functional Language: PSML}
-{Proc TENCON '89, Bombay, India, Nov 1989}
-{
-Handling exception occurrences during computation is a problem in most
-functional programming languages, even when the computation is eager and
-sequential. This paper presents a version of the error value method
-which allows lazy computation with deterministic semantics for parallel
-evaluation even in the presence of errors. The realisation of this
-technique is illustrated by reference to PSML, a referentially
-transparent variant of Standard ML designed for parallel evaluation.
-}
-
-\reference
-{Philip Wadler}
-{Theorems for free!}
-{{\em 4'th International Conference on Functional Programming
-Languages and Computer Architecture}, London, September 1989.}
-{
-From the type of a polymorphic function we can derive a theorem
-that it satisfies. Every function of the same type satisfies the same
-theorem. This provides a free source of useful theorems,
-courtesy of Reynolds' abstraction theorem for the polymorphic lambda
-calculus.
-}
-
-\reference
-{Philip Wadler and Stephen Blott}
-{How to make {\em ad-hoc\/} polymorphism less {\em ad hoc}}
-{{\em 16'th ACM Symposium on Principles of Programming Languages},
-Austin, Texas, January 1989.}
-{
-This paper presents {\em type classes}, a new approach to {\em
-ad-hoc\/} polymorphism. Type classes permit overloading of arithmetic
-operators such as multiplication, and generalise the ``eqtype variables''
-of Standard ML.
-Type classes extend the Hindley\X Milner polymorphic type system, and
-provide a new approach to issues that arise in object-oriented
-programming, bounded type quantification, and abstract data types.
-This paper provides an informal introduction to type classes, and
-defines them formally by means of type inference rules.
-}
-
-\reference{Kevin Hammond}
-{Implementing Type Classes for Haskell}
-{Proc Glasgow Workshop on Functional Programming, Fraserburgh, Aug 1989}
-{
-This paper describes the implementation of the type class mechanism for
-the functional language Haskell, which has been undertaken at Glasgow
-University. A simple introduction to type classes discusses the methods
-used to select operators and dictionaries in the Glasgow Haskell
-compiler. A solution to the problem of selecting super-class
-dictionaries, not considered by the original paper on type class, is
-also presented. The modifications which must be made to the standard
-Hindley/Milner type-checking algorithm to permit the translation of
-operators are described, and a revised definition of algorithm W is
-provided. Finally, a set of performance figures compares the run-time
-efficiency of Haskell and LML programs, indicating the overhead inherent
-in the original, naive method of operator selection, and the improvement
-which may be obtained through simple optimisations.
-}
-
-\reference{Simon L Peyton Jones}
-{FLIC - a functional language intermediate code}
-{SIGPLAN Notices 23(8) 1988, revised 1989}
-{
-FLIC is a Functional Language Intermediate Code, intended to
-provide a common intermediate language between diverse
-implementations of functional languages, including parallel
-ones.
-This paper gives a formal definition of FLIC's syntax and
-semantics, in the hope that its existence may encourage greater
-exchange of programs and benchmarks between research groups.
-}
-
-\reference{Simon L Peyton Jones, Chris Clack, Jon Salkild, Mark Hardie}
-{Functional programming on the GRIP multiprocessor}
-{Proc IEE Seminar on Digital Parallel Processors, Lisbon, Portugal, 1988}
-{
-Most MIMD computer architectures can be classified as
-tightly-coupled or loosely-coupled,
-depending on the relative latencies seen by a processor accessing different
-parts of its address space.
-
-By adding microprogrammable functionality to the memory units, we have
-developed a MIMD computer architecture which explores the middle region
-of this spectrum.
-This has resulted in an unusual and flexible bus-based multiprocessor,
-which we are using as a base for our research in parallel functional programming
-languages.
-
-In this paper we introduce parallel functional programming, and describe
-the architecture of the GRIP multiprocessor.
-}
-
-\reference{Geoffrey Burn, Simon L Peyton Jones, and John Robson}
-{The spineless G-machine}
-{Proc ACM Conference on Lisp and Functional Programming, Snowbird, pp244-258,
-August 1988}
-{
-Recent developments in functional language implementations have
-resulted in the G-machine, a programmed graph-reduction machine.
-Taking this as a basis, we introduce an optimised method of
-performing graph reduction, which does not need to build the
-spine of the expression being reduced.
-This Spineless G-machine only updates shared expressions, and
-then only when they have been reduced to weak head normal form.
-It is thus more efficient than the standard method of performing
-graph reduction.
-
-We begin by outlining the philosophy and key features of the
-Spineless G-machine, and comparing it with the standard
-G-machine.
-Simulation results for the two machines are then presented and
-discussed.
-
-The Spineless G-machine is also compared with Tim, giving a
-series of transformations by which they can be interconverted.
-These open up a wide design space for abstract graph reduction
-machines, which was previously unknown.
-
-A full specification of the machine is given in the appendix,
-together with compilation rules for a simple functional language.
-}
-
-\reference{Chris Hankin, Geoffrey Burn, and Simon L Peyton Jones}
-{A safe approach to parallel combinator reduction}
-{Theoretical Computer Science 56, pp17-36, North Holland, 1988}
-{
-In this paper we present the results of two pieces of work which, when
-combined, allow us to take a program text of a functional langauge and
-produce a parallel implementation of that program.
-We present the techniques for discovering sources of parallelism in
-a program at compile time, and then show how this parallelism is
-naturally mapped onto a parallel combinator set that we will define.
-
-To discover sources of parallelism in a program, we use
-{\em abstract interpretation} a compile-time technique which is used
-to gain information about a program which may then be used to optimise
-the program's execution.
-A particular use of abstract interpretation is in
-{\em strictness analysis}
-of functional program.
-In a language that has lazy semantics, the main potential for parallelism
-arises in the evaluation of arguments of strict operators.
-
-Having identified the sources of parallelism at compile time, it is
-necessary to communicate these to the run-time system.
-In the second part of the paper we introduce an extended set of combinators,
-including some parallel combinators, to achieve this purpose.
-}
-
-\reference{Simon L Peyton Jones}
-{GRIP - a parallel processor for functional languages}
-{Electronics and Power, pp633-636, Oct 1987;
-also in ICL Technical Journal 5(3), May 1987}
-{
-A brief 4-page article about the GRIP architecture.
-}
-
-\reference{Simon L Peyton Jones, Chris Clack, Jon Salkild, and Mark Hardie}
-{GRIP - a high-performance architecture for parallel graph reduction}
-{Proc IFIP conference on Functional Programming Languages and
-Computer Architecture, Portland,
-ed Kahn, Springer Verlag LNCS 274, pp98-112, Sept 1987}
-{
-GRIP is a high-performance parallel machine designed to execute
-functional programs using supercombinator graph reduction.
-It uses a high-bandwidth bus to provide access to a
-large, distributed shared memory, using intelligent memory units and
-packet-switching protocols to increase the number of processors
-which the bus can support.
-GRIP is also being programmed to support parallel Prolog and
-DACTL.
-
-We outline GRIP's architecture and firmware, discuss the major design
-issues, and describe the current state of the project and
-our plans for the future.
-}
-
-\reference{Simon L Peyton Jones and Chris Clack}
-{Finding fixpoints in abstract interpretation}
-{in Abstract Interpretation of Declarative Languages,
-ed Hankin \& Abramsky, Ellis Horwood, pp246-265, 1987.}
-{
-Abstract interpretation is normally used as the basis for
-a static, compile-time analysis of a program.
-For example, strictness analysis attempts to establish which
-functions in the program are strict (we will use strictness
-analysis as a running example).
-
-Using abstract interpretation in this way requires the
-compile-time evaluation of expressions in the abstract domain.
-It is obviously desirable that this evaluation should
-always terminate, since otherwise the compiler would risk
-non-termination.
-In the case of non-recursive functions there is no problem, and
-termination is guaranteed.
-Recursive functions, however, present more of a problem, and it
-is the purpose of this paper to explain the problem and to
-offer some practical solutions.
-}
-
-\reference{Chris Clack and Simon L Peyton Jones}
-{The four-stroke reduction engine}
-{Proc ACM Conference on Lisp and Functional Programming,
-Boston, pp220-232, Aug 1986}
-{
-Functional languages are widely claimed to be amenable to concurrent
-execution by multiple processors. This paper presents an algorithm for
-the parallel graph reduction of a functional program.
-The algorithm supports transparent management of parallel
-tasks with no explicit
-communication between processors.
-}
-
-\reference{Simon L Peyton Jones}
-{Functional programming languages as a software engineering tool}
-{in Software Engineering - the critical decade D Ince,
-Peter Peregrinus, pp124-151, 1986}
-{
-It is the purpose of this paper to suggest that functional
-languages are an appropriate tool for supporting the activity
-of programming in the large, and to present a justification of
-this claim.
-}
-
-\reference{Simon L Peyton Jones}
-{Using Futurebus in a fifth generation computer architecture}
-{Microprocessors and Microsystems 10(2), March 1986}
-{
-Despite the bandwidth limitations of a bus, we present a design
-for a parallel computer (GRIP) based on Futurebus, which limits bus
-bandwidth requirements by using intelligent memories.
-
-Such a machine offers higher performance than a uniprocessor
-and lower cost than a more extensible multiprocessor, as well
-as serving as a vehicle for research in parallel architectures.
-}
-
-
-\section{Internal reports}
-
-\reference{Kevin Hammond and John Glauert}
-{Implementing Pattern-Matching Functional Languages using Dactl}
-{University of Glasgow, 1989}
-{
-This paper describes the implementation of a family of pattern-matching
-functional languages in the parallel graph-rewriting language Dactl.
-Attention is focussed on the direct implementation of the
-pattern-matching constructs in the context of various reduction
-strategies: eager, lazy, and lazy with strictness analysis. Two new
-reduction strategies combining lazy evaluation with a technique for
-compiling non-overlapping patterns are also illustrated. The latter
-strategies provide improved termination properties compared with
-conventional functional language implementations for non-overlapping
-patterns. The implementations described here cover all pattern-matching
-constructs found in Standard ML, including named patterns and deep
-patterns. The use of Dactl renders explicit the complexities of
-pattern-matching which are obscured by implementation in a conventional
-intermediate language or abstract machine.
-}
-
-\reference{Simon L Peyton Jones}
-{A practical technique for designing asynchronous finite-state machines}
-{Proc Glasgow Workshop on Functional Programming, Fraserburgh,Aug 1989}
-{
-The literature on asynchronous logic design is mostly of a fairly theoretical
-nature. We present a practical technique for generating asynchronous finite-state
-machines from a description of their states and transitions. The technique
-has been used successfully to design a number of state machines in
-the GRIP mulitprocessor.
-}
-
-\reference{Kevin Hammond}
-{A Proposal for an Implementation of Full Dactl on a Meiko Transputer Rack}
-{SYS-C89-02, University of East Anglia, 1989}
-{
-The design of an abstract machine instruction set for Dactl is
-described. The instruction set is sufficient to encapsulate all Dactl
-constructs; it will also permit parallel execution where applicable.
-The paper considers the difficulties involved in the implementation of
-this abstract instruction set on the UEA Meiko M40 transputer rack,
-using a ZAPP-style kernel. Part of the code for a simulation of this
-instruction set is included as an appendix to the report.
-}
-
-\reference{Chris Clack}
-{Tuning the four-stroke reduction engine}
-{University College London, January 1989 ($\clubsuit$)}
-{
-This paper analyses the current implementation of the four-stroke reduction
-engine (a virtual machine for parallel graph reduction).
-The current implementation is shown to be inefficient, and a number of
-enhancements are suggested.
-This paper proposes that major performance benefits will accrue from
-increasing the intelligence of the memory units and giving them a more
-important role in the four-stroke cycle.
-}
-
-\reference{Chris Clack}
-{Performance cost accounting for GRIP}
-{University College London, January 1989 ($\clubsuit$)}
-{
-This paper presents a general model for efficiency anakysis of shared-memory
-parallel graph reduction architectures.
-The efficiency of the GRIP implementation of the four-stroke reduction engine
-is subsequently analysed by approtioning costs to the various components
-of the general model.
-
-In particular, attention is focussed on the two aspects of execution
-profiling, and analysis of resource utilsation.
-}
-
-\reference{Chris Clack}
-{Diagnosis and cure for dislocated spines}
-{University College London, June 1988 ($\clubsuit$)}
-{
-Locality of reference is a key issue for parallel machines, and especially
-for parallel implementations of graph reduction.
-If locality can be achieved then communications costs fall,
-and we are better able to exploit distributed architectures.
-This paper analyses a particular implementation of graph reduction --
-the four-stroke reduction engine -- and introduces the concept of
-spine-locality as a basis for graph building and task-scheduling strategies
-that enhance locality.
-}
-
-\end{document}
diff --git a/ghc/docs/abstracts/abstracts90.tex b/ghc/docs/abstracts/abstracts90.tex
deleted file mode 100644
index 4bf6c657f0..0000000000
--- a/ghc/docs/abstracts/abstracts90.tex
+++ /dev/null
@@ -1,153 +0,0 @@
-\documentstyle[11pt,slpj,abstracts]{article}
-
-\begin{document}
-
-\title{Abstracts of GRIP/GRASP-related papers and reports, 1990
-}
-
-\author{The GRASP team \\ Department of Computing Science \\
-University of Glasgow G12 8QQ
-}
-
-\maketitle
-
-\begin{abstract}
-We present a list of papers and reports related to the GRIP
-and GRASP projects,
-covering {\em the design, compilation technology,
-and parallel implementations of functional programming languages, especially
-\Haskell{}}.
-
-Most of them can be obtained by FTP. Connect to {\tt ftp.dcs.glasgow.ac.uk},
-and look in {\tt pub/glasgow-fp/papers}, {\tt pub/glasgow-fp/drafts}, {\tt pub/glasgow-fp/tech\_reports},
-or {\tt pub/glasgow-fp/grasp-and-aqua-docs}.
-
-They can also be obtained by writing to
-Alexa Stewart, Department of Computing Science,
-University of Glasgow G12 8QQ, UK. Her electronic mail address is
-alexa@dcs.glasgow.ac.uk.
-\end{abstract}
-
-\section{Published papers}
-
-\reference
-{Philip Wadler}
-{Comprehending monads}
-{{\em ACM Conference on Lisp and Functional Programming},
-Nice, France, pp.\ 61--78, June 1990.}
-{
-Category theorists invented {\em monads\/} in the 1960's
-to concisely express certain aspects of universal algebra.
-Functional programmers invented {\em list comprehensions\/}
-in the 1970's to concisely express certain programs involving lists.
-This paper shows how list comprehensions may be generalised
-to an arbitrary monad, and how the resulting programming feature
-can concisely express in a pure functional language some
-programs that manipulate state,
-handle exceptions, parse text, or invoke continuations.
-A new solution to the old problem
-of destructive array update is also presented.
-No knowledge of category theory is assumed.
-}
-
-\reference
-{Philip Wadler}
-{Linear types can change the world!}
-{{\em IFIP TC 2 Working Conference on Programming
-Concepts and Methods}, Sea of Galilee, Israel, April 1990.}
-{
-The linear logic of J.-Y.~Girard suggests a new type
-system for functional languages, one which supports operations
-that ``change the world''.
-Values belonging to a linear type must be used exactly once:
-like the world, they cannot be duplicated or destroyed.
-Such values require no reference counting or garbage collection,
-and safely admit destructive array update.
-Linear types extend Schmidt's notion of single threading;
-provide an alternative to Hudak and Bloss' update analysis;
-and offer a practical complement to Lafont and Holmstr\"om's elegant
-linear languages.
-}
-
-\reference{K Hammond and SL Peyton Jones}
-{Some early experiments on the GRIP parallel reducer}
-{Proc Nijmegen Workshop on Parallel Implementations of Functional Languages, TR 90-16, Dept
-of Informatics, University of Nijmegen, ed Plasmeijer, 1990, pp51-72}
-{
-GRIP is a multiprocessor designed to execute functional programs in
-parallel using graph reduction. We have implemented a compiler for
-GRIP, based on the Spineless Tagless G-machine
-and can now run parallel functional programs with substantial absolute
-speedup over the same program running on a uniprocessor Sun.
-
-Parallel functional programming shifts some of the burden of resource
-allocation from the programmer to the system. Examples of such
-decisions include: when to create a new concurrent activity (or {\em
-thread}), when to execute such threads, where to execute them, and so
-on.
-
-It is clearly desirable that the system should take such decisions,
-{\em provided it does
-a good enough job}. For example, a paged virtual memory system
-almost always does an adequate job, and a programmer very seldom
-has to intefere with it.
-The big question for parallel functional programming is whether good
-resource-allocation strategies exist, and how well they perform under a
-variety of conditions.
-
-Now that we have an operational system, we are starting to carry out
-experiments to develop resource-allocation strategies, and measure
-their effectiveness. This paper reports on some very preliminary
-results. They mainly concern the question of when, or even whether,
-to create a new thread. This is an aspect which has so far received
-little attention --- existing work has focused mainly
-on load sharing rather than on thread creation.
-}
-
-
-\section{Technical reports}
-
-\reference
-{Simon L Peyton Jones and Philip Wadler}
-{A static semantics for \Haskell{}}
-{Dept of Computing Science, University of Glasgow}
-{
-This paper gives a static semantics for a large subset of \Haskell{}, including
-giving a translation into a language without overloading.
-It is our intention to cover the complete language in due course.
-
-One innovative aspect is the use of ideas from the second-order lambda
-calculus to record type information in the program.
-
-The paper is long (40 pages) and is more of a reference document than
-a narrative one.
-}
-
-\reference
-{Philip Wadler}
-{A simple type inference algorithm}
-{Dept of Computing Science, University of Glasgow}
-{
-This program is intended as a showcase for Haskell's
-literate programming facility and for the monadic style
-of programming. It implements Hindley-Milner type inference.
-Monads are used for parsing and to simplify ``plumbing'' in the type
-checker. The monads for parsing, exceptions, and state as well
-as the routines for unparsing are designed to be of general utility.
-}
-
-\reference{The Grasp team}
-{The Glasgow Haskell I/O system}
-{Dept of Computing Science, University of Glasgow, Nov 1991}
-{
-Most input/output systems for non-strict functional languages
-feature a rather large ``operating system
-The Glasgow Haskell system implements input and output
-very largely within Haskell itself, without the conventional
-enclosing ``printing mechanism''. This paper explains how the
-IO system works in some detail.
-}
-
-\end{document}
-
-
diff --git a/ghc/docs/abstracts/abstracts91.tex b/ghc/docs/abstracts/abstracts91.tex
deleted file mode 100644
index 913007e3ba..0000000000
--- a/ghc/docs/abstracts/abstracts91.tex
+++ /dev/null
@@ -1,232 +0,0 @@
-\documentstyle[11pt,slpj,abstracts]{article}
-
-\begin{document}
-
-\title{Abstracts of GRIP/GRASP-related papers and reports, 1991
-}
-
-\author{The GRASP team \\ Department of Computing Science \\
-University of Glasgow G12 8QQ
-}
-
-\maketitle
-
-\begin{abstract}
-We present a list of papers and reports related to the GRIP
-and GRASP projects,
-covering {\em the design, compilation technology,
-and parallel implementations of functional programming languages, especially
-\Haskell{}}.
-
-Most of them can be obtained by FTP. Connect to {\tt ftp.dcs.glasgow.ac.uk},
-and look in {\tt pub/glasgow-fp/papers}, {\tt pub/glasgow-fp/drafts}, {\tt pub/glasgow-fp/tech\_reports},
-or {\tt pub/glasgow-fp/grasp-and-aqua-docs}.
-
-They can also be obtained by writing to
-Alexa Stewart, Department of Computing Science,
-University of Glasgow G12 8QQ, UK. Her electronic mail address is
-alexa@dcs.glasgow.ac.uk.
-\end{abstract}
-
-\section{Published papers}
-
-\reference
-{Simon L Peyton Jones and David Lester}
-{A modular fully-lazy lambda lifter in \Haskell{}}
-{{\em Software Practice and Experience}, 21(5) (May 1991)}
-{An important step in many compilers for functional languages is
-{\em lambda lifting}. In his thesis, Hughes showed that by doing lambda
-lifting in a particular way, a useful property called {\em full laziness}
-can be preserved;
-full laziness has been seen as intertwined with
-lambda lifting ever since.
-
-We show that, on the contrary, full laziness can be regarded as a completely
-separate process to lambda lifting, thus making it easy to use different
-lambda lifters following a full-laziness transformation, or to use
-the full-laziness transformation in compilers which do not require lambda
-lifting.
-
-On the way, we present the complete code for our modular fully-lazy
-lambda lifter, written in the \Haskell{} functional programming language.
-}
-
-\reference{Simon L Peyton Jones and Mark Hardie}
-{A Futurebus interface from off-the-shelf parts}
-{IEEE Micro, Feb 1991}
-{
-As part of the GRIP project we have designed a Futurebus interface using
-off-the-shelf parts.
-We describe our implementation, which is unusual in its use of fully
-asynchronous finite-state machines.
-Based on this experience we draw some lessons for future designs.
-}
-
-\reference{Simon L Peyton Jones and John Launchbury}
-{Unboxed values as first class citizens}
-{Functional Programming Languages and Computer Architecture (FPCA), Boston,
-ed Hughes, Springer LNCS 523, Sept 1991, pp636--666}
-{The code compiled from a non-strict functional program usually
-manipulates heap-allocated {\em boxed} numbers.
-Compilers for such languages often go to considerable trouble to
-optimise operations on boxed numbers into simpler operations
-on their unboxed forms. These optimisations are usually handled
-in an {\em ad hoc} manner in
-the code generator, because earlier phases of the compiler have
-no way to talk about unboxed values.
-
-We present a new approach, which makes unboxed values into (nearly) first-class
-citizens. The language, including its type system, is extended to
-handle unboxed values. The optimisation of boxing and unboxing operations
-can now be reinterpreted as a set of correctness-preserving program
-transformations. Indeed the particular transformations
-required are ones which a compiler would want to implement anyway.
-The compiler becomes both simpler and more modular.
-
-Two other benefits accrue.
-Firstly, the results of strictness analysis can be exploited within
-the same uniform transformational framework.
-Secondly, new algebraic data types with
-unboxed components can be declared. Values of these types can be
-manipulated much more efficiently than the corresponding boxed versions.
-
-Both a static and a dynamic semantics are given for the augmented language.
-The denotational dynamic semantics is notable for its use of
-{\em unpointed domains}.
-}
-
-\reference{Philip Wadler}
-{Is there a use for linear logic?}
-{ACM/IFIP Symposium on Partial Evaluation
-and Semantics Based Program Manipulation (PEPM), Yale
-University, June 1991}
-{
-Past attempts to apply Girard's linear logic have either had a clear
-relation to the theory (Lafont, Holmstr\"om, Abramsky) or a clear
-practical value (Guzm\'an and Hudak, Wadler), but not both. This paper
-defines a sequence of languages based on linear logic that span the gap
-between theory and practice. Type reconstruction in a linear type
-system can derive information about sharing. An approach to linear type
-reconstruction based on {\em use types\/} is presented. Applications
-to the {\em array update\/} problem are considered.
-}
-
-\reference{Simon L Peyton Jones}
-{The Spineless Tagless G-machine: a second attempt}
-{Proc Workshop on Parallel Implementations of Functional Languages,
-University of Southampton, ed Glaser \& Hartel, June 1991}
-{The Spineless Tagless G-machine is an abstract machine designed
-to support functional languages. This presentation of the machine
-falls into two parts. Firstly, we present the {\em STG language},
-an austere but recognisably-functional language, which as well as
-a {\em denotational} meaning has a well-defined {\em operational} semantics.
-The STG language is the ``abstract machine code'' for the Spineless
-Tagless G-machine, but it is sufficiently abstract that it can readily be
-compiled into G-machine Gcode or TIM code instead.
-
-Secondly, we discuss the mapping of the STG language onto stock hardware.
-The success of an abstract machine model depends largely on how efficient
-this mapping can be made, though this topic is often relegated to a short
-section. Instead, we give a detailed discussion of the design issues and
-the choices we have made. Our principal target is the C language, treating
-the C compiler as a portable assembler.
-
-A revised version is in preparation for the Journal of Functional Programming.
-}
-
-\reference{Gert Akerholt, Kevin Hammond, Simon Peyton Jones and Phil Trinder}
-{A parallel functional database on GRIP}
-{\GlasgowNinetyOne{}, pp1-24}
-{
-GRIP is a shared-memory multiprocessor designed for efficient parallel
-evaluation of functional languages, using compiled graph reduction.
-In this paper, we consider the feasibility of implementing a database
-manager on GRIP, and present results obtained from a pilot
-implementation. A database implemented in a pure functional language
-must be modified {\em non-destructively}, i.e.\ the original database
-must be preserved and a new copy constructed. The naive
-implementation provides evidence for the feasibility of a pure
-functional database in the form of modest real-time speed-ups, and
-acceptable real-time performance. This performance can be tentatively
-compared with results for existing machines running a more
-sophisticated database benchmark.
-The functional database is also used to investigate the GRIP
-architecture, compared with an idealised machine. The particular
-features investigated are the thread-creation costs and caching of
-GRIP's distributed memory.
-}
-
-\reference{PM Sansom}
-{Combining single-space and two-space compacting garbage collectors}
-{\GlasgowNinetyOne{}, pp312-324}
-{The garbage collector presented in this paper makes use of
-two well known compaction garbage collection algorithms with very
-different performance characteristics: Cheney's two-space copying
-collector and Jon\-ker's single-space sliding compaction collector. We
-propose a scheme which allows either collector to be used. The
-run-time memory requirements of the program being executed are used to
-determine the most appropriate collector. This enables us to achieve a
-fast collector for heap requirements less than half of the heap memory
-but allows the heap utilization to increase beyond this threshold.
-Using these ideas we develop a particularly attractive extension to
-Appel's generational collector.
-}
-
-\reference{PM Sansom}
-{Dual-mode garbage collection}
-{Proc Workshop on the Parallel Implementation of Functional Languages, Southampton,
-ed Glaser \& Hartel, pp283-310}
-{
-The garbage collector presented in this paper makes use of two well
-known compaction garbage collection algorithms with very different
-performance characteristics: Cheney's two-space copying collector and
-Jonker's sliding compaction collector. We propose a scheme which
-allows either collector to be used. The run-time memory requirements
-of the program being executed are used to determine the most
-appropriate collector. This enables us to achieve a fast collector for
-heap requirements less than half of the heap memory but allows the
-heap utilization to increase beyond this threshold. Using these ideas
-we develop a particularly attractive extension to Appel's generational
-collector.
-
-We also describe a particularly fast implementation of the garbage
-collector which avoids interpreting the structure and current state of
-closures by attaching specific code to heap objects. This code {\em
-knows} the structure and current state of the object and performs the
-appropriate actions without having to test any flag or arity fields.
-The result is an implementation of these collection schemes which does
-not require any additional storage to be associated with the heap
-objects.
-
-This paper is an earlier, and fuller, version of ``Combining
-single-space and two-space compacting garbage collectors'' above.
-}
-
-\reference{K Hammond}
-{Efficient type inference using monads}
-{\GlasgowNinetyOne{}, pp146-157}
-{{\em Efficient} type inference algorithms are based on
-graph-rewriting techniques. Consequently, at first sight they seem
-unsuitable for functional language implementation. In fact, most
-compilers written in functional languages use substitution-based
-algorithms, at a considerable cost in performance. In this paper, we
-show how monads may be used to transform a substutition-based inference
-algorithm into one using a graph representation. The resulting
-algorithm is faster than the corresponding substitution-based one.}
-
-
-\section{Technical reports}
-
-\reference{The Grasp team}
-{The Glasgow Haskell I/O system}
-{Dept of Computing Science, University of Glasgow, Nov 1991}
-{
-Most input/output systems for non-strict functional languages
-feature a rather large ``operating system
-The Glasgow Haskell system implements input and output
-very largely within Haskell itself, without the conventional
-enclosing ``printing mechanism''. This paper explains how the
-IO system works in some detail.
-}
-
-\end{document}
diff --git a/ghc/docs/abstracts/abstracts92.tex b/ghc/docs/abstracts/abstracts92.tex
deleted file mode 100644
index 6c25d665bc..0000000000
--- a/ghc/docs/abstracts/abstracts92.tex
+++ /dev/null
@@ -1,292 +0,0 @@
-\documentstyle[11pt,slpj,abstracts]{article}
-\begin{document}
-
-% ======================================================
-
-\title{Abstracts of GRIP/GRASP-related papers and reports, 1992
-}
-
-\author{The GRASP team \\ Department of Computing Science \\
-University of Glasgow G12 8QQ
-}
-
-\maketitle
-
-\begin{abstract}
-We present a list of papers and reports related to the GRIP
-and GRASP projects,
-covering {\em the design, compilation technology,
-and parallel implementations of functional programming languages, especially
-\Haskell{}}.
-
-Most of them can be obtained by FTP. Connect to {\tt ftp.dcs.glasgow.ac.uk},
-and look in {\tt pub/glasgow-fp/papers}, {\tt pub/glasgow-fp/drafts}, {\tt pub/glasgow-fp/tech\_reports},
-or {\tt pub/glasgow-fp/grasp-and-aqua-docs}.
-
-They can also be obtained by writing to
-Alexa Stewart, Department of Computing Science,
-University of Glasgow G12 8QQ, UK. Her electronic mail address is
-alexa@dcs.glasgow.ac.uk.
-\end{abstract}
-
-\section{Book}
-
-\reference{Simon Peyton Jones and David Lester}
-{Implementing functional languages}
-{Prentice Hall, 1992}
-{
-This book gives a practical approach to understanding implementations
-of non-strict functional languages using lazy graph reduction.
-
-An unusual feature of the book is that the text of each chapter is
-itself a directly-executable Miranda(TM) program, constituting a
-minimal but complete compiler and interpreter for a particular
-abstract machine. The complete source code for the book, and a
-Tutor's Guide containing solutions to the exercises, is available in
-machine-readable form by network file transfer (FTP).
-
-Written to allow the reader to modify, extend and experiment with the
-implementations provided in the text, this book will help to make a
-course on functional-langauge implementation "come alive".
-
-{\bf Contents}. The Core Language. Template instantiation. The G-machine.
-The Three Instruction Machine. A parallel G-machine. Lambda lifting
-and full laziness. Appendices. Bibliography. Index.
-}
-
-
-\section{Published papers}
-
-\reference{Simon L Peyton Jones}
-{Implementing lazy functional languages on stock hardware:
-the Spineless Tagless G-machine}
-{Journal of Functional Programming 2(2) (Apr 1992)}
-{The Spineless Tagless G-machine is an abstract machine designed
-to support non-strict higher-order
-functional languages. This presentation of the machine
-falls into three parts. Firstly, we give a general discussion of
-the design issues involved in implementing non-strict functional
-languages.
-
-Next, we present the {\em STG language},
-an austere but recognisably-functional language, which as well as
-a {\em denotational} meaning has a well-defined {\em operational} semantics.
-The STG language is the ``abstract machine code'' for the Spineless
-Tagless G-machine.
-
-Lastly, we discuss the mapping of the STG language onto stock hardware.
-The success of an abstract machine model depends largely on how efficient
-this mapping can be made, though this topic is often relegated to a short
-section. Instead, we give a detailed discussion of the design issues and
-the choices we have made. Our principal target is the C language, treating
-the C compiler as a portable assembler.
-
-This paper used to be called ``The Spineless Tagless G-machine: a second
-attempt'', but has been retitled and substantially expanded with new
-material which tries to set the machine in the context of compiler
-technology for other languages. The paper is very long (65 pages) and
-has an index.
-}
-
-\reference{Philip Wadler}
-{The essence of functional programming}
-{Invited talk, 19'th Annual Symposium on Principles of
-Programming Languages, Santa Fe, New Mexico, Jan 1992}
-{
-This paper explores the use monads to structure functional programs.
-No prior knowledge of monads or category theory is required.
-
-Monads increase the ease with which programs may be modified. They can
-mimic the effect of impure features such as exceptions, state, and
-continuations; and also provide effects not easily achieved with such
-features. The types of a program reflect which effects occur.
-
-The first section is an extended example of the use of monads. A
-simple interpreter is modified to support various extra features: error
-messages, state, output, and non-deterministic choice. The second
-section describes the relation between monads and continuation-passing
-style. The third section sketches how monads are used in a compiler
-for Haskell that is written in Haskell.
-}
-
-\reference{A Santos and SL Peyton Jones}
-{On program transformation and the Glasgow Haskell compiler}
-{\GlasgowNinetyTwo{}, pp240-251}
-{We describe a series of program transformations that are implemented
-in the Glasgow Haskell Compiler. They are semantics preserving
-transformations and suitable for automatic application by a compier.
-We describe the transformations, how they interact, and their impact
-on the time/space behaviour of some programs.}
-
-\reference{P Sansom and SL Peyton Jones}
-{Profiling lazy functional languages}
-{\GlasgowNinetyTwo{}, pp227-239}
-{Profiling tools, which measure and display the dynamic space
-and time behaviour of programs, are essential for identifying
-execution bottlenecks. A variety of such tools exist for conventional
-languages, but almost none for non-strict functional languages. There
-is a good reason for this: lazy evaluation means that the program is
-executed in an order which is not immediately apparent from the source
-code, so it is difficult to relate dynamically-gathered statistics
-back to the original source.
-
-We present a new technique which solves this problem. The framework is
-general enough to profile both space and time behaviour. Better still,
-it is cheap to implement, and we describe how to do so in the
-context of the Spineless Tagless G-machine.
-}
-
-\reference{CV Hall, K Hammond, WD Partain, SL Peyton Jones, and PL Wadler}
-{The Glasgow Haskell Compiler: a retrospective}
-{\GlasgowNinetyTwo{}, pp62-71}
-{We've spent much of our time over the last
-two years implementing a new compiler for the functional language Haskell
-In this effort, we've been joined by Andy Gill, who has implemented a
-strictness analyser, Andre Santos, who has contributed a `simplifier', and
-Patrick Sansom, who wrote garbage collectors for our runtime system.
-
-This paper describes some of the things we have learned, what we might
-do differently, and where we go from here.
-}
-
-\reference{D King and PL Wadler}
-{Combining monads}
-{\GlasgowNinetyTwo{}, pp134-143}
-{Monads provide a way of structuring functional programs.
-Most real applications require a combination of primitive monads.
-Here we describe how some monads may be combined with others to
-yield a {\em combined monad}.}
-
-\reference{J Launchbury, A Gill, RJM Hughes, S Marlow, SL Peyton Jones, and PL Wadler}
-{Avoiding unnecessary updates}
-{\GlasgowNinetyTwo{}, pp144-153}
-{Graph reduction underlies most implementations of lazy functional
-languages, allowing separate computations to share results when
-sub-terms are evaluated. Once a term is evaluated, the node of the
-graph representing the computation is {\em updated} with the value of
-the term. However, in many cases, no other computation requires this
-value, so the update is unnecessary. In this paper we take some steps
-towards an analysis for determining when these updates may be omitted.
-}
-
-\reference{S Marlow and PL Wadler}
-{Deforestation for higher-order functions}
-{\GlasgowNinetyTwo{}, pp154-165}
-{Deforestation is an automatic transformation scheme for functional
-programs which attempts to remove unnecessary intermediate data
-structures. The algorithm presented here is a variant of the original,
-adapted for a higher order language. A detailed description of how
-this may be implemented in an optimising compiler is also given.
-}
-
-\reference{WD Partain}
-{The nofib benchmark suite of Haskell programs}
-{\GlasgowNinetyTwo{}, pp195-202}
-{This position paper describes the need for, make-up of, and
-``rules of the game'' for a benchmark suite of Haskell programs. (It
-does not include results from running the suite.) Those of us working
-on the Glasgow Haskell compiler hope this suite will encourage sound,
-quantitative assessment of lazy functional programming systems. This
-version of this paper reflects the state of play at the initial
-pre-release of the suite.
-}
-
-\reference{PL Wadler}
-{Monads for functional programming}
-{Proceedings of the Marktoberdorf Summer School on Programming Calculi,
-ed M Broy, July-August 1992, Springer Verlag}
-{The use of monads to structure functional programs is
-described. Monads provide a convenient framework for simulating
-effects found in other languages, such as global state, exception
-handling, output, or non-determinism. Three case studies are looked at
-in detail: how monads ease the modification of a simple evaluator;
-how monads act as the basis of a datatype of arrays subject to in-place
-update; and how monads can be used to build parsers.
-}
-
-\reference{K Hammond, P Trinder, P Sansom and D McNally}
-{Improving persistent data manipulation for functional languages}
-{\GlasgowNinetyTwo{}, pp72-85}
-{Although there is a great deal of academic interest in
-functional languages, there are very few large-scale functional
-applications. The poor interface to the file system seems to be a
-major factor preventing functional languages being used for
-large-scale programming tasks. The interfaces provided by some
-widely-used languages are described and some problems encountered with
-using these interfaces to access persistent data are discussed. Three
-means of improving the persistent data manipulation facilities of
-functional languages are considered: an improved interface to the file
-system, including a good binary file implementation; an interface to a
-database; and the provision of orthogonal persistence. Concrete
-examples are given using the functional programming language, Haskell.
-}
-
-\reference{Kevin Hammond and Simon Peyton Jones}
-{Profiling scheduling strategies on the GRIP parallel reducer}
-{Proc 1992 Workshop on Parallel Implementations of Functional Languages, Aachen,
-ed Kuchen, Sept 1992}
-{It is widely claimed that functional languages are particularly
-suitable for programming parallel computers. A claimed advantage is
-that the programmer is not burdened with details of task creation,
-placement, scheduling, and synchronisation, these decisions being
-taken by the system instead.
-
-Leaving aside the question of whether a pure functional language is
-expressive enough to encompass all the parallel algorithms we might
-wish to program, there remains the question of how effectively the
-compiler and run-time system map the program onto a real parallel
-system, a task usually carried out mostly by the programmer. This is
-the question we address in our paper.
-
-We first introduce the system architecture of GRIP, a shared-memory
-parallel machine supporting an implementation of the functional
-language Haskell. GRIP executes functional programs in parallel using
-compiled supercombinator graph reduction, a form of
-declarative rule system.
-
-We then to describe several strategies for run-time resource
-control which we have tried, presenting comprehensive measurements of
-their effectiveness. We are particularly concerned with strategies
-controlling task creation, in order to improve task granularity and
-minimise communication overheads. This is, so far as we know, one of
-the first attempts to make a systematic study of task-control
-strategies in a high-performance parallel functional-language system.
-GRIP's high absolute performance render these results credible for
-real applications.
-}
-
-\section{Technical reports}
-
-\reference{CV Hall, K Hammond, SL Peyton Jones, PL Wadler}
-{Type classes in Haskell}
-{Department of Computing Science, University of Glasgow}
-{This paper defines a set of type inference rules for resolving
-overloading introduced by type classes. Programs including type
-classes are transformed into ones which may be typed by the
-Hindley-Milner inference rules. In contrast to an other work on type
-classes, the rules presented here relate directly to user programs. An
-innovative aspect of this work is the use of second-order lambda
-calculus to record type information in the program.
-}
-
-\shortreference{CV Hall}
-{A transformed life: introducing optimised lists automatically}
-{submitted to FPCA 93}
-{}
-
-\shortreference{K Hammond}
-{The Spineless Tagless G-machine --- NOT!}
-{submitted to FPCA 93}
-
-\shortreference{CV Hall}
-{An optimists view of Life}
-{submitted to Journal of Functional Programming, 1993}
-{}
-% ~cvh/Public/Papers/An_Optimists_View.dvi
-
-\end{document}
-
-
-
-
-
diff --git a/ghc/docs/abstracts/abstracts93.tex b/ghc/docs/abstracts/abstracts93.tex
deleted file mode 100644
index fa15bebdde..0000000000
--- a/ghc/docs/abstracts/abstracts93.tex
+++ /dev/null
@@ -1,326 +0,0 @@
-\documentstyle[11pt,slpj,abstracts]{article}
-
-\begin{document}
-
-% ======================================================
-
-\title{Abstracts of GRIP/GRASP/AQUA-related papers and reports, 1993
-}
-
-\author{The AQUA team \\ Department of Computing Science \\
-University of Glasgow G12 8QQ
-}
-
-\maketitle
-
-\begin{abstract}
-We present a list of papers and reports related to the GRIP, GRASP and AQUA projects,
-covering {\em the design, compilation technology,
-and parallel implementations of functional programming languages, especially
-\Haskell{}}.
-
-Most of them can be obtained by FTP. Connect to {\tt ftp.dcs.glasgow.ac.uk},
-and look in {\tt pub/glasgow-fp/papers}, {\tt pub/glasgow-fp/drafts}, {\tt pub/glasgow-fp/tech\_reports},
-or {\tt pub/glasgow-fp/grasp-and-aqua-docs}.
-
-They can also be obtained by writing to
-Alexa Stewart, Department of Computing Science,
-University of Glasgow G12 8QQ, UK. Her electronic mail address is
-alexa@dcs.glasgow.ac.uk.
-\end{abstract}
-
-\section{Published papers}
-
-\reference{CV Hall}
-{Using overloading to express distinctions}
-{Information Processing Letters (to appear)}
-{
-Evaluators, also called ``interpreters'', play a variety of roles
-in the study of programming languages. Given this, it's surprising
-that we don't have a better framework for developing evaluators and
-specifying their relationship to each other. This paper
-shows that type classes in Haskell provide an excellent
-framework for exploring relationships between evaluators, using
-abstract interpretation as a motivating example.
-}
-
-\reference{A Gill, J Launchbury and SL Peyton Jones}
-{A short cut to deforestation}
-{ACM Conference on Functional Programming and Computer Architecture, Copenhagen, pp223-232}
-{Lists are often used as ``glue'' to connect
-separate parts of a program together.
-We propose an automatic technique for
-improving the efficiency of such programs,
-by removing many of these intermediate lists,
-based on a single, simple, local transformation.
-We have implemented the method in the Glasgow Haskell compiler.
-}
-
-\reference{P Sansom and SL Peyton Jones}
-{Generational garbage collection for Haskell}
-{ACM Conference on Functional Programming and Computer Architecture, Copenhagen, pp106-116}
-{This paper examines the use of generational garbage collection
-techniques for a lazy implementation of a non-strict functional
-language. Detailed measurements which demonstrate that a generational
-garbage collector can substantially out-perform non-generational
-collectors, despite the frequency of write operations in the
-underlying implementation, are presented.
-
-Our measurements are taken from a state-of-the-art compiled
-implementation for Haskell, running substantial benchmark programs.
-We make measurements of dynamic properties (such as object lifetimes)
-which affect generational collectors, study their interaction with a
-simple generational scheme, make direct performance comparisons with
-simpler collectors, and quantify the interaction with a paging system.
-
-The generational collector is demonstrably superior. At least for our
-benchmarks, it reduces the net storage management overhead, and it
-allows larger programs to be run on a given machine before thrashing
-ensues.}
-
-\reference{J Launchbury}
-{Lazy imperative programming}
-{Proc ACM Sigplan Workshop on State in Programming Languages, Copenhagen, June 1993 (available as
-YALEU/DCS/RR-968, Yale University), pp46-56}
-{
-In this paper we argue for the importance of lazy state, that is,
-sequences of imperative (destructive) actions in which the actions are
-delayed until their results are required. This enables state-based
-computations to take advantage of the control power of lazy evaluation.
-We provide some examples of its use, and describe an implementation
-within Glasgow Haskell.
-}
-
-\reference{G Akerholt, K Hammond, P Trinder and SL Peyton Jones}
-{Processing transactions on GRIP, a parallel graph reducer}
-{Proc Parallel Architectures and Languages Europe (PARLE), Munich, June 1993, pp634-647}
-{
-The GRIP architecture allows efficient execution of functional
-programs on a multi-processor built from standard hardware components.
-State-of-the-art compilation techniques are combined with
-sophisticated runtime resource-control to give good parallel
-performance. This paper reports the results of running GRIP on an
-application which is apparently unsuited to the basic functional
-model: a database transaction manager incorporating updates as well as
-lookup transactions. The results obtained show good relative speedups
-for GRIP, with real performance advantages over the same application
-executing on sequential machines.
-}
-
-\reference{SL Peyton Jones and PL Wadler}
-{Imperative functional programming}
-{ACM conference on Principles of Programming Languages, Charleston, Jan 1993}
-{We present a new model, based on monads, for performing input/output
-in a non-strict, purely functional language. It
-is composable, extensible, efficient, requires no extensions
-to the type system, and extends smoothly to incorporate mixed-language
-working and in-place array updates.
-}
-
-\reference{J Launchbury}
-{An operational semantics for lazy evaluation}
-{ACM conference on Principles of Programming Languages, Charleston, Jan 1993}
-{We define an operational semantics for lazy evaluation which provides
-an accurate model for sharing. The only computational structure
-we introduce is a set of bindings which corresponds closely to a
-heap. The semantics is set at a considerably higher level of abstraction
-than operational semantics for particular abstract machines, so is
-more suitable for a variety of proofs. Furthermore, because a heap
-is explicitly modelled, the semantics provides a suitable framework
-for studies about space behaviour of terms under lazy evaluation.
-}
-
-\reference{SL Peyton Jones, CV Hall, K Hammond, WD Partain, and PL Wadler}
-{The Glasgow Haskell compiler: a technical overview}
-{JFIT Technical Conference, Keele, March 1993}
-{We give an overview of the Glasgow Haskell compiler,
-focusing especially on way in which we have been able
-to exploit the rich theory of functional languages to give
-very practical improvements in the compiler.
-
-The compiler is portable, modular, generates good code, and is
-freely available.
-}
-
-\reference{PL Wadler}
-{A syntax for linear logic}
-{Mathematical Foundations of
-Programming Language Semantics, New Orleans, April 1993}
-{There is a standard syntax for Girard's linear logic, due
-to Abramsky, and a standard semantics, due to Seely. Alas, the
-former is incoherent with the latter: different derivations of
-the same syntax may be assigned different semantics. This paper
-reviews the standard syntax and semantics, and discusses the problem
-that arises and a standard approach to its solution. A new solution
-is proposed, based on ideas taken from Girard's Logic of Unity.
-The new syntax is based on pattern matching, allowing for concise
-expression of programs.}
-
-\reference{SL Peyton Jones, J Hughes, J Launchbury}
-{How to give a good research talk}
-{SIGPLAN Notices 28(11), Nov 1993, 9-12}
-{
-Giving a good research talk is not easy. We try to identify some things
-which we have found helpful, in the hope that they may be useful to you.
-}
-
-
-\section{Workshop papers and technical reports}
-
-The 1993 Glasgow Functional Programming Workshop papers exist in
-the draft proceedings at the moment. They are being refereed, and will
-be published by Springer Verlag in due course.
-
-\reference{DJ King and J Launchbury}
-{Lazy Depth-First Search and Linear Graph Algorithms in Haskell}
-{\GlasgowNinetyThree{}}
-{
-Depth-first search is the key to a wide variety of graph algorithms.
-In this paper we explore the implementation of depth first search in a
-lazy functional language. For the first time in such languages we
-obtain a linear-time implementation. But we go further. Unlike
-traditional imperative presentations, algorithms are constructed from
-individual components, which may be reused to create new
-algorithms. Furthermore, the style of program is quite amenable to
-formal proof, which we exemplify through a calculational-style proof
-of a strongly-connected components algorithm.
-
-{\em This paper has been submitted to Lisp \& Functional Programming 1994.}
-}
-
-\reference{K Hammond, GL Burn and DB Howe}
-{Spiking your caches}
-{\GlasgowNinetyThree{}}
-{
-Despite recent advances, predicting the performance of functional
-programs on real machines remains something of a black art. This
-paper reports on one particularly unexpected set of results where
-small variations in the size of a dynamic heap occasionally gave rise
-to 50\% differences in CPU performance. These performance {\em
-spikes} can be traced to the cache architecture of the machine being
-benchmarked, the widely-used Sun Sparcstation~1. A major contribution
-of our work is the provision of a tool which allows cache conflicts
-to be located by the type of access (heap, stack etc.). This can
-be used to improve the functional language implementation, or to
-study the memory access patterns of a particular program.
-}
-
-\reference{S Marlow}
-{Update avoidance analysis}
-{\GlasgowNinetyThree{}}
-{
-A requirement of lazy evaluation is that the value of any
-subexpression in the program is calculated no more than once. This is
-achieved by updating an expression with its value, once computed. The
-problem is that updating is a costly operation, and experimentation
-has shown that it is only necessary in about 30\% of cases (that is,
-70\% of expressions represent values that are only ever required once
-during execution). The aim of the analysis presented in this paper is
-to discover expressions that do not need to be updated, and thus
-reduce the execution time of the program. The analysis has been
-implemented in the Glasgow Haskell Compiler, and results are given.
-
-FTP: @pub/glasgow-fp/authors/Simon_Marlow/update-avoidance.ps.gz@
-}
-
-\reference{SL Peyton Jones and WD Partain}
-{Measuring the effectiveness of a simple strictness analyser}
-{\GlasgowNinetyThree{}}
-{
-A lot has been written about strictness analysis for non-strict
-functional programs, usually in the hope that the results of the
-analysis can be used to reduce runtime. On the other hand, few papers
-present measurements of how well it works in practice. Usually, all
-that is presented are the run-times of a variety of (usually small)
-programs, with and without strictness analysis enabled. The goal of
-this paper is to provide detailed quantitative insight about the
-effectiveness of a simple strictness analyser, in the context of a
-state-of-the art compiler running serious application programs.
-}
-
-\reference{J Mattson}
-{Local speculative evaluation for distributed graph reduction}
-{\GlasgowNinetyThree{}}
-{
-Speculative evaluation attempts to increase parallelism by
-performing potentially useful computations before they are known to be
-necessary. Speculative computations may be coded explicitly in a
-program, or they may be scheduled implicitly by the reduction system
-as idle processors become available. A general approach to both kinds
-of speculation incurs a great deal of overhead which may outweigh the
-benefits of speculative evaluation for fine-grain speculative tasks.
-
-Suppose that implicit speculative computations are restricted to
-execution on the local processor, with the hope of performing
-potentially useful work while the local mandatory tasks are all
-blocked. This restriction greatly simplifies the problems of
-speculative task management, and it opens the door for fine-grain
-speculative tasks. More complex mechanisms for distributing
-and managing coarse-grain speculative tasks can later be added on top of
-the basic foundation provided for local speculative evaluation.
-}
-
-\reference{PM Sansom}
-{Time profiling a lazy functional compiler}
-{\GlasgowNinetyThree{}}
-{
-Recent years has seen the development of profiling tools for lazy
-functional language implementations. This paper presents the results
-of using a time profiler to profile the Glasgow Haskell compiler.
-}
-
-\reference{D King and J Launchbury}
-{Functional graph algorithms with depth-first search}
-{\GlasgowNinetyThree{}}
-{Performing a depth-first search of a graph is one of the fundamental
-approaches for solving a variety of graph algorithms. Implementing
-depth-first search efficiently in a pure functional language has only
-become possible with the advent of imperative functional programming.
-In this paper we mix the techniques of pure functional programming in
-the same cauldron as depth-first search, to yield a more lucid
-approach to viewing a variety of graph algorithms. This claim will be
-illustrated with several examples.}
-
-\reference{A Santos and SL Peyton Jones}
-{Tuning a compiler's allocation policy}
-{\GlasgowNinetyThree{}}
-{There are many different costs involved in the allocation of
-closures during the execution of functional programs. Even more so
-for closures that are not in normal form, as they have to be
-allocated and then possibley entered and updated. We compare several
-different policies for closure allocation, trying to minimise these
-costs. The issues of laziness and full laziness are discussed.
-}
-
-\reference{CV Hall}
-{A framework for optimising abstract data types}
-{\GlasgowNinetyThree{}}
-{Two trends have been developing in functional programming language
-research. First, compilers are supporting optimisations of data
-types, such as unboxed types and parallel bags. Second, functional
-programmers are increasingly writing code in a style that treats
-data types as if they were abstract. Abstract data types offer
-opportunities for optimisation because the representation of the
-type can be optimised without affecting the program, allowing the
-programmer to use operations on it and improve performance. At the
-same time, the original type is often required by some part of the
-program, and the programmer is left to figure out which to use
-where.
-
-This paper presents a general framework in which good functional
-style automatically supports the efficient implementation of data
-types. It has been implemented in the Glasgow Haskell compiler
-specifically to introduce an optimised list representation, and
-this has been shown to cut execution time in half on a Sun
-SPARCstation-1 for a substantial program. Recent tests show that
-it improves performance by more than a factor of 4 on the GRIP
-parallel processor for short tests, however more experiments will
-be necessary before we can assert that this speedup holds in
-general.
-}
-\end{document}
-
-
-
-
-
diff --git a/ghc/docs/abstracts/abstracts94.tex b/ghc/docs/abstracts/abstracts94.tex
deleted file mode 100644
index 7ee257db60..0000000000
--- a/ghc/docs/abstracts/abstracts94.tex
+++ /dev/null
@@ -1,187 +0,0 @@
-\documentstyle[11pt,slpj,abstracts]{article}
-
-\begin{document}
-
-% ======================================================
-
-\title{Abstracts of GRIP/GRASP/AQUA-related papers and reports, 1994
-}
-
-\author{The AQUA team \\ Department of Computing Science \\
-University of Glasgow G12 8QQ
-}
-
-\maketitle
-
-\begin{abstract}
-We present a list of papers and reports related to the GRIP, GRASP and AQUA projects,
-covering {\em the design, compilation technology,
-and parallel implementations of functional programming languages, especially
-\Haskell{}}.
-
-Most of them can be obtained by FTP. Connect to {\tt ftp.dcs.glasgow.ac.uk},
-and look in {\tt pub/glasgow-fp/papers}, {\tt pub/glasgow-fp/drafts}, {\tt pub/glasgow-fp/tech\_reports},
-or {\tt pub/glasgow-fp/grasp-and-aqua-docs}.
-
-Another useful place to look is on the Functional Programming Group WWW page:
-{\tt ftp://ftp.dcs.glasgow.ac.uk/pub/glasgow-fp/glasgow-fp.html}.
-
-They can also be obtained by writing to
-Helen McNee, Department of Computing Science,
-University of Glasgow G12 8QQ, UK. Her electronic mail address is
-helen@dcs.glasgow.ac.uk.
-\end{abstract}
-
-\section{Published papers}
-
-\reference{J Launchbury and SL Peyton Jones}
-{State in Haskell}
-{To appear in Lisp and Symbolic Computation (50 pages)}
-{
-Some algorithms make critical internal use of updatable state, even
-though their external specification is purely functional. Based on
-earlier work on monads, we present a way of securely encapsulating
-stateful computations that manipulate multiple, named, mutable
-objects, in the context of a non-strict, purely-functional language.
-The security of the encapsulation is assured by the type system, using
-parametricity. The same framework is also used to handle input/output
-operations (state changes on the external world) and calls to C.
-
-FTP: {\tt pub/glasgow-fp/drafts/state-lasc.ps.Z}
-}
-
-\reference{P Sansom and SL Peyton Jones}
-{Time and space profiling for non-strict higher-order functional languages}
-{To appear in POPL 95}
-{
-We present the first profiler for a compiled, non-strict, higher-order,
-purely functional language capable of measuring {\em time}
-as well as {\em space} usage. Our profiler is implemented
-in a production-quality optimising compiler for Haskell,
-has low overheads, and can successfully profile large applications.
-
-A unique feature of our approach is that we give a formal
-specification of the attribution of execution costs to cost centres.
-This specification enables us to discuss our design decisions in a
-precise framework. Since it is not obvious how to map this
-specification onto a particular implementation, we also present an
-implementation-oriented operational semantics, and prove it equivalent
-to the specification.
-}
-
-
-% pub/glasgow-fp/authors/Philip_Wadler/monads-for-fp.dvi
-
-\reference{Philip Wadler}
-{Monads for functional programming}
-{in M. Broy (editor),
-{\em Program Design Calculi}, proceedings of the International
-Summer School directed by F. L. Bauer, M. Broy, E. W. Dijkstra, D.
-Gries, and C. A. R. Hoare. Springer Verlag, NATO ASI series, Series
-F: Computer and System Sciences, Volume 118, 1994}
-{
-The use of monads to structure functional programs is
-described. Monads provide a convenient framework for simulating
-effects found in other languages, such as global state, exception
-handling, output, or non-determinism. Three case studies are looked at
-in detail: how monads ease the modification of a simple evaluator;
-how monads act as the basis of a datatype of arrays subject to in-place
-update; and how monads can be used to build parsers.
-}
-
-% pub/glasgow-fp/authors/Philip_Wadler/taste-of-linear-logic.dvi
-\reference{Philip Wadler}
-{A taste of linear logic}
-{{\em Mathematical Foundations of Computer Science},
-Gdansk, Poland, August 1993, Springer Verlag, LNCS 711}
-{This tutorial paper provides an introduction to intuitionistic logic
-and linear logic, and shows how they correspond to type systems for
-functional languages via the notion of `Propositions as Types'. The
-presentation of linear logic is simplified by basing it on the Logic
-of Unity. An application to the array update problem is briefly
-discussed.
-}
-
-% It's in
-% /local/grasp/docs/short-static-semantics/new-paper/kevins-latest-version
-
-\reference{Cordelia Hall, Kevin Hammond, Simon Peyton Jones and Philip Wadler}
-{Type classes in Haskell}
-{European Symposium on Programming, 1994}
-{
-This paper defines a set of type inference rules for resolving
-overloading introduced by type classes. Programs including type
-classes are transformed into ones which may be typed by the
-Hindley-Milner inference rules. In contrast to other work on type
-classes, the rules presented here relate directly to user programs.
-An innovative aspect of this work is the use of second-order lambda
-calculus to record type information in the program.
-}
-
-\reference{PL Wadler}
-{Monads and composable continuations}
-{Lisp and Symbolic Computation 7(1)}
-{Moggi's use of monads to factor semantics is used to model the
-composable continuations of Danvy and Filinski. This yields some
-insights into the type systems proposed by Murthy and by Danvy and
-Filinski. Interestingly, modelling some aspects of composable
-continuations requires a structure that is almost, but not quite, a
-monad.
-}
-
-\reference{J Launchbury and SL Peyton Jones}
-{Lazy Functional State Threads}
-{Programming Languages Design and Implementation, Orlando, June 1994}
-{
-Some algorithms make critical internal use of updatable state, even
-though their external specification is purely functional. Based on
-earlier work on monads, we present a way of securely encapsulating
-such stateful computations, in the context of a non-strict,
-purely-functional language.
-
-There are two main new developments in this paper. First, we show how
-to use the type system to securely encapsulate stateful computations,
-including ones which manipulate multiple, named, mutable objects.
-Second, we give a formal semantics for our system.
-
-FTP: {\tt pub/glasgow-fp/papers/state.ps.Z}
-}
-
-\reference{K Hammond, JS Mattson Jr. and SL Peyton Jones}
-{Automatic spark strategies and granularity for a parallel functional language reducer}
-{CONPAR, Sept 1994}
-{
-This paper considers the issue of dynamic thread control in the context
-of a parallel Haskell implementation on the GRIP multiprocessor.
-For the first time we report the effect of our thread control strategies
-on thread granularity, as measured by dynamic heap allocation. This
-gives us a concrete means of measuring the effectiveness of these strategies,
-other than wall-clock timings which are notoriously uninformative.
-
-FTP: {\tt pub/glasgow-fp/papers/spark-strategies-and-granularity.ps.Z}
-}
-
-\reference{K Hammond}
-{Parallel Functional Programming: an Introduction}
-{PASCO '94, Sept. 1994 (Invited Paper)}
-
-This paper introduces the general area of parallel functional
-programming, surveying the current state of research and suggesting
-areas which could profitably be explored in the future. No new
-results are presented. The paper contains 97 references selected from
-the 500 or so publications in this field.
-
-FTP: {\tt pub/glasgow-fp/papers/parallel-introduction.ps.Z}
-
-% \section{Workshop papers and technical reports}
-
-% The 1994 Glasgow Functional Programming Workshop papers exist in
-% the draft proceedings at the moment. They are being refereed, and will
-% be published by Springer Verlag in due course.
-
-\end{document}
-
-
-
-
-
diff --git a/ghc/docs/abstracts/before90.tex b/ghc/docs/abstracts/before90.tex
deleted file mode 100644
index ae3d95d600..0000000000
--- a/ghc/docs/abstracts/before90.tex
+++ /dev/null
@@ -1,471 +0,0 @@
-\documentstyle[11pt,slpj]{article}
-
-\newcommand{\reference}[4]{ % authors, title, details, abstract
-\large
-#1, {\em #2}, #3.
-\normalsize
-\begin{quotation}
-#4
-\end{quotation}
-\vspace{0.2in}
-}
-
-\newcommand{\Haskell}[1]{{\sc Haskell}}
-
-\begin{document}
-
-\title{Abstracts of GRIP/GRASP-related papers and reports before 1990\\
-Dept of Computing Science \\
-University of Glasgow G12 8QQ}
-
-\author{
-Cordelia Hall (cvh@cs.glasgow.ac.uk) \and
-Kevin Hammond (kh@cs.glasgow.ac.uk) \and
-Will Partain (partain@cs.glasgow.ac.uk) \and
-Simon L Peyton Jones (simonpj@cs.glasgow.ac.uk) \and
-Phil Wadler (wadler@cs.glasgow.ac.uk)
-}
-
-\maketitle
-
-\begin{abstract}
-We present a list of papers and reports related to the GRIP
-and GRASP projects,
-covering {\em the design, compilation technology,
-and parallel implementations of functional programming languages, especially
-\Haskell{}}.
-
-Most of them can be obtained by writing to
-Teresa Currie, Dept of Computing Science,
-University of Glasgow G12 8QQ, UK. Her electronic mail address is
-teresa@uk.ac.glasgow.cs.
-
-Those marked ($\spadesuit$) can be obtained from the School of Information
-Systems, University of East Anglia, Norwich, UK.
-\end{abstract}
-
-\section{Published papers}
-%Nov
-\reference{Cordelia Hall and David Wise}
-{Generating Function Versions with Rational Strictness Patterns}
-{Science of Computer Programming 12 (1989)}
-{Expression evaluation in lazy applicative languages is usually implemented
-by an expensive mechanism requiring time and space which may be wasted
-if the expression eventually needs the values anyway. Strictness analysis,
-which has been successfully applied to flat domains and higher order functions,
-is used here to annotate programs in a first order language containing
-lazy list constructors so that they retain their original behavior, but
-run more efficiently. In practice, the strictness in fields within these
-constructors often follows regular patterns that can be finitely
-represented, especially in programs that manipulate such useful structures
-as finite or infinite trees. The approach presented here typically generates
-efficient, mutually recursive function versions for these programs.
-Weak and strong safety are defined and discussed, and the compiler
-is shown to be weakly safe. Termination is guaranteed by several factors,
-including a finite resource which controls the increase in code size,
-and a regularity constraint placed upon the strictness patterns
-propagated during compilation.}
-
-\reference{Kevin Hammond}
-{Exception Handling in a Parallel Functional Language: PSML}
-{Proc TENCON '89, Bombay, India, Nov 1989}
-{
-Handling exception occurrences during computation is a problem in most
-functional programming languages, even when the computation is eager and
-sequential. This paper presents a version of the error value method
-which allows lazy computation with deterministic semantics for parallel
-evaluation even in the presence of errors. The realisation of this
-technique is illustrated by reference to PSML, a referentially
-transparent variant of Standard ML designed for parallel evaluation.
-}
-
-\reference
-{Phil Trinder and Philip Wadler}
-{Improving list comprehension database queries}
-{{\em TENCON '89\/} (IEEE Region 10 Conference),
-Bombay, India, November 1989.}
-{
-The task of increasing the efficiency of database queries has recieved
-considerable attention. In this paper we describe the improvement of
-queries expressed as list comprehensions in a lazy functional
-language. The database literature identifies four algebraic and two
-implementation-based improvement strategies. For each strategy we show
-an equivalent improvement for queries expressed as list
-comprehensions. This means that well-developed database algorithms
-that improve queries using several of these strategies can be emulated
-to improve comprehension queries. We are also able to improve queries
-which require greater power than that provided by the relational
-algebra. Most of the improvements entail transforming a simple,
-inefficient query into a more complex, but more efficient form. We
-illustrate each improvement using examples drawn from the database
-literature.
-}
-
-%Sept
-
-
-\reference{Simon L Peyton Jones and Jon Salkild}
-{The Spineless Tagless G-machine}
-{Proc IFIP Symposium on Functional Programming Languages and Computer
-Architecture, London, Sept 1989}
-{
-The Spineless Tagless G-machine is an abstract machine based on graph
-reduction, designed as a target for compilers for non-strict functional
-languages.
-As its name implies, it is a development of earlier work, especially
-the G-machine and Tim.
-
-It has a number of unusual features: the abstract machine code is
-rather higher-level than is common, allowing better code generation;
-the representation of the graph eliminates most interpretive overheads;
-vectored returns from data structures give fast case-analysis;
-and the machine is readily extended for a parallel implementation.
-
-The resulting implementation runs at least 75\% faster
-than the Chalmers G-machine.
-}
-
-\reference
-{Philip Wadler}
-{Theorems for free!}
-{{\em 4'th International Conference on Functional Programming
-Languages and Computer Architecture}, London, September 1989.}
-{
-From the type of a polymorphic function we can derive a theorem
-that it satisfies. Every function of the same type satisfies the same
-theorem. This provides a free source of useful theorems,
-courtesy of Reynolds' abstraction theorem for the polymorphic lambda
-calculus.
-}
-
-%Aug
-
-\reference{Kevin Hammond}
-{Implementing Type Classes for Haskell}
-{Proc Glasgow Workshop on Functional Programming, Fraserburgh, Aug 1989}
-{
-This paper describes the implementation of the type class mechanism for
-the functional language Haskell, which has been undertaken at Glasgow
-University. A simple introduction to type classes discusses the methods
-used to select operators and dictionaries in the Glasgow Haskell
-compiler. A solution to the problem of selecting super-class
-dictionaries, not considered by the original paper on type class, is
-also presented. The modifications which must be made to the standard
-Hindley/Milner type-checking algorithm to permit the translation of
-operators are described, and a revised definition of algorithm W is
-provided. Finally, a set of performance figures compares the run-time
-efficiency of Haskell and LML programs, indicating the overhead inherent
-in the original, naive method of operator selection, and the improvement
-which may be obtained through simple optimisations.
-}
-
-\reference{Simon L Peyton Jones}
-{FLIC - a functional language intermediate code}
-{SIGPLAN Notices 23(8) 1988, revised 1989}
-{
-FLIC is a Functional Language Intermediate Code, intended to
-provide a common intermediate language between diverse
-implementations of functional languages, including parallel
-ones.
-This paper gives a formal definition of FLIC's syntax and
-semantics, in the hope that its existence may encourage greater
-exchange of programs and benchmarks between research groups.
-}
-
-%July
-\reference{Simon L Peyton Jones, Chris Clack and Jon Salkild}
-{High-performance parallel graph reduction}
-{Proc Parallel Architectures and Languages Europe (PARLE), LNCS 365, pp193-207,
-July 1989}
-{
-Parallel graph reduction is an attractive implementation for functional
-programming languages because of its simplicity and inherently distributed
-nature.
-This paper outlines some of the issues raised by parallel compiled
-graph reduction, and presents the solutions we have adopted to produce an
-efficient implementation.
-
-We concentrate on two particular issues:
-the efficient control of parallelism, resulting in an ability to alter
-the granularity of parallelism
-{\em dynamically};
-and the efficient use of the memory hierachy to improve locality.
-}
-%April
-
-\reference{Simon L Peyton Jones}
-{Parallel implementations of functional programming languages}
-{Computer Journal 32(2), pp175-186, April 1989}
-{
-It is now very nearly as easy to build a parallel computer
-as to build a sequential one, and there are strong incentives to do so:
-parallelism seems to offer the opportunity to improve both the
-absolute performance level and the cost/performance ratio of our machines.
-
-One of the most attractive features of functional programming languages
-is their suitability for programming such parallel computers.
-This paper is devoted to a discussion of this claim.
-
-First of all, we discuss parallel functional programming
-from the programmer's point of view.
-Most parallel functional language implementations are based on graph reduction,
-we proceed to a discussion of some implementation issues raised by parallel
-graph reduction.
-The paper concludes with a case study of a particular parallel graph reduction
-machine, GRIP, and a brief survey of other similar machines.
-}
-%Jan
-\reference
-{Philip Wadler and Stephen Blott}
-{How to make {\em ad-hoc\/} polymorphism less {\em ad hoc}}
-{{\em 16'th ACM Symposium on Principles of Programming Languages},
-Austin, Texas, January 1989.}
-{
-This paper presents {\em type classes}, a new approach to {\em
-ad-hoc\/} polymorphism. Type classes permit overloading of arithmetic
-operators such as multiplication, and generalise the ``eqtype variables''
-of Standard ML.
-Type classes extend the Hindley\X Milner polymorphic type system, and
-provide a new approach to issues that arise in object-oriented
-programming, bounded type quantification, and abstract data types.
-This paper provides an informal introduction to type classes, and
-defines them formally by means of type inference rules.
-}
-%88
-
-\reference{Chris Hankin, Geoffrey Burn, and Simon L Peyton Jones}
-{A safe approach to parallel combinator reduction}
-{Theoretical Computer Science 56, pp17-36, North Holland, 1988}
-{
-In this paper we present the results of two pieces of work which, when
-combined, allow us to take a program text of a functional langauge and
-produce a parallel implementation of that program.
-We present the techniques for discovering sources of parallelism in
-a program at compile time, and then show how this parallelism is
-naturally mapped onto a parallel combinator set that we will define.
-
-To discover sources of parallelism in a program, we use
-{\em abstract interpretation} a compile-time technique which is used
-to gain information about a program which may then be used to optimise
-the program's execution.
-A particular use of abstract interpretation is in
-{\em strictness analysis}
-of functional program.
-In a language that has lazy semantics, the main potential for parallelism
-arises in the evaluation of arguments of strict operators.
-
-Having identified the sources of parallelism at compile time, it is
-necessary to communicate these to the run-time system.
-In the second part of the paper we introduce an extended set of combinators,
-including some parallel combinators, to achieve this purpose.
-}
-
-
-\reference{John T. O'Donnell and Cordelia Hall}
-{Debugging in Applicative Languages}
-{Lisp and Symbolic Computation, 1988}
-{Applicative programming languages have several properties that appear
-to make debugging difficult. First, the absence of assignment
-statements complicates the notion of changing a program while
-debugging. Second, the absence of imperative input and output
-makes it harder to obtain information about what the program is doing.
-Third, the presence of lazy evaluation prevents the user from
-knowing much about the order in which events occur. Some solutions to
-these problems involve nonapplicative extensions to the language.
-Fortunately, the same features of applicative languages that cause
-problems for traditional debugging also support an idiomatic
-applicative style of programming, and effective debugging techniques
-can be implemented using that style. This paper shows how to implement
-tracing and interactive debugging tools in a purely applicative
-style. This approach is more flexible, extensive and portable
-than debugging tools that require modification to the language
-implementation.}
-
-\reference{Simon L Peyton Jones, Chris Clack, Jon Salkild, Mark Hardie}
-{Functional programming on the GRIP multiprocessor}
-{Proc IEE Seminar on Digital Parallel Processors, Lisbon, Portugal, 1988}
-{
-Most MIMD computer architectures can be classified as
-tightly-coupled or loosely-coupled,
-depending on the relative latencies seen by a processor accessing different
-parts of its address space.
-
-By adding microprogrammable functionality to the memory units, we have
-developed a MIMD computer architecture which explores the middle region
-of this spectrum.
-This has resulted in an unusual and flexible bus-based multiprocessor,
-which we are using as a base for our research in parallel functional programming
-languages.
-
-In this paper we introduce parallel functional programming, and describe
-the architecture of the GRIP multiprocessor.
-}
-
-\reference{Geoffrey Burn, Simon L Peyton Jones, and John Robson}
-{The spineless G-machine}
-{Proc ACM Conference on Lisp and Functional Programming, Snowbird, pp244-258,
-August 1988}
-{
-Recent developments in functional language implementations have
-resulted in the G-machine, a programmed graph-reduction machine.
-Taking this as a basis, we introduce an optimised method of
-performing graph reduction, which does not need to build the
-spine of the expression being reduced.
-This Spineless G-machine only updates shared expressions, and
-then only when they have been reduced to weak head normal form.
-It is thus more efficient than the standard method of performing
-graph reduction.
-
-We begin by outlining the philosophy and key features of the
-Spineless G-machine, and comparing it with the standard
-G-machine.
-Simulation results for the two machines are then presented and
-discussed.
-
-The Spineless G-machine is also compared with Tim, giving a
-series of transformations by which they can be interconverted.
-These open up a wide design space for abstract graph reduction
-machines, which was previously unknown.
-
-A full specification of the machine is given in the appendix,
-together with compilation rules for a simple functional language.
-}
-%87
-
-\reference{Simon L Peyton Jones and Chris Clack}
-{Finding fixpoints in abstract interpretation}
-{in Abstract Interpretation of Declarative Languages,
-ed Hankin \& Abramsky, Ellis Horwood, pp246-265, 1987.}
-{
-Abstract interpretation is normally used as the basis for
-a static, compile-time analysis of a program.
-For example, strictness analysis attempts to establish which
-functions in the program are strict (we will use strictness
-analysis as a running example).
-
-Using abstract interpretation in this way requires the
-compile-time evaluation of expressions in the abstract domain.
-It is obviously desirable that this evaluation should
-always terminate, since otherwise the compiler would risk
-non-termination.
-In the case of non-recursive functions there is no problem, and
-termination is guaranteed.
-Recursive functions, however, present more of a problem, and it
-is the purpose of this paper to explain the problem and to
-offer some practical solutions.
-}
-
-\reference{Simon L Peyton Jones}
-{GRIP - a parallel processor for functional languages}
-{Electronics and Power, pp633-636, Oct 1987;
-also in ICL Technical Journal 5(3), May 1987}
-{
-A brief 4-page article about the GRIP architecture.
-}
-
-\reference{Simon L Peyton Jones, Chris Clack, Jon Salkild, and Mark Hardie}
-{GRIP - a high-performance architecture for parallel graph reduction}
-{Proc IFIP conference on Functional Programming Languages and
-Computer Architecture, Portland,
-ed Kahn, Springer Verlag LNCS 274, pp98-112, Sept 1987}
-{
-GRIP is a high-performance parallel machine designed to execute
-functional programs using supercombinator graph reduction.
-It uses a high-bandwidth bus to provide access to a
-large, distributed shared memory, using intelligent memory units and
-packet-switching protocols to increase the number of processors
-which the bus can support.
-GRIP is also being programmed to support parallel Prolog and
-DACTL.
-
-We outline GRIP's architecture and firmware, discuss the major design
-issues, and describe the current state of the project and
-our plans for the future.
-}
-%86
-\reference{Chris Clack and Simon L Peyton Jones}
-{The four-stroke reduction engine}
-{Proc ACM Conference on Lisp and Functional Programming,
-Boston, pp220-232, Aug 1986}
-{
-Functional languages are widely claimed to be amenable to concurrent
-execution by multiple processors. This paper presents an algorithm for
-the parallel graph reduction of a functional program.
-The algorithm supports transparent management of parallel
-tasks with no explicit
-communication between processors.
-}
-
-\reference{Simon L Peyton Jones}
-{Functional programming languages as a software engineering tool}
-{in Software Engineering - the critical decade D Ince,
-Peter Peregrinus, pp124-151, 1986}
-{
-It is the purpose of this paper to suggest that functional
-languages are an appropriate tool for supporting the activity
-of programming in the large, and to present a justification of
-this claim.
-}
-
-\reference{Simon L Peyton Jones}
-{Using Futurebus in a fifth generation computer architecture}
-{Microprocessors and Microsystems 10(2), March 1986}
-{
-Despite the bandwidth limitations of a bus, we present a design
-for a parallel computer (GRIP) based on Futurebus, which limits bus
-bandwidth requirements by using intelligent memories.
-
-Such a machine offers higher performance than a uniprocessor
-and lower cost than a more extensible multiprocessor, as well
-as serving as a vehicle for research in parallel architectures.
-}
-
-\section{Internal reports}
-
-
-\reference{Kevin Hammond}
-{A Proposal for an Implementation of Full Dactl on a Meiko Transputer Rack}
-{SYS-C89-02, University of East Anglia, 1989}
-{
-The design of an abstract machine instruction set for Dactl is
-described. The instruction set is sufficient to encapsulate all Dactl
-constructs; it will also permit parallel execution where applicable.
-The paper considers the difficulties involved in the implementation of
-this abstract instruction set on the UEA Meiko M40 transputer rack,
-using a ZAPP-style kernel. Part of the code for a simulation of this
-instruction set is included as an appendix to the report.
-}
-
-
-\reference{Kevin Hammond and John Glauert}
-{Implementing Pattern-Matching Functional Languages using Dactl}
-{University of Glasgow, 1989}
-{
-This paper describes the implementation of a family of pattern-matching
-functional languages in the parallel graph-rewriting language Dactl.
-Attention is focussed on the direct implementation of the
-pattern-matching constructs in the context of various reduction
-strategies: eager, lazy, and lazy with strictness analysis. Two new
-reduction strategies combining lazy evaluation with a technique for
-compiling non-overlapping patterns are also illustrated. The latter
-strategies provide improved termination properties compared with
-conventional functional language implementations for non-overlapping
-patterns. The implementations described here cover all pattern-matching
-constructs found in Standard ML, including named patterns and deep
-patterns. The use of Dactl renders explicit the complexities of
-pattern-matching which are obscured by implementation in a conventional
-intermediate language or abstract machine.
-}
-
-\reference{Simon L Peyton Jones}
-{A practical technique for designing asynchronous finite-state machines}
-{Proc Glasgow Workshop on Functional Programming, Fraserburgh,Aug 1989}
-{
-The literature on asynchronous logic design is mostly of a fairly theoretical
-nature. We present a practical technique for generating asynchronous finite-state
-machines from a description of their states and transitions. The technique
-has been used successfully to design a number of state machines in
-the GRIP mulitprocessor.
-}
-
-\end{document}
diff --git a/ghc/docs/abstracts/reports.tex b/ghc/docs/abstracts/reports.tex
deleted file mode 100644
index fc8a332900..0000000000
--- a/ghc/docs/abstracts/reports.tex
+++ /dev/null
@@ -1,111 +0,0 @@
-\documentstyle[11pt,slpj]{article}
-
-\newcommand{\reference}[4]{ % authors, title, details, abstract
-\large
-#1, {\em #2}, #3.
-\normalsize
-\begin{quotation}
-#4
-\end{quotation}
-\vspace{0.2in}
-}
-
-\newcommand{\Haskell}[1]{{\sc Haskell}}
-
-\begin{document}
-
-\title{Abstracts of GRIP/GRASP-related design documents and manuals \\
-Dept of Computing Science \\
-University of Glasgow G12 8QQ}
-
-\author{
-Cordelia Hall (cvh@cs.glasgow.ac.uk) \and
-Kevin Hammond (kh@cs.glasgow.ac.uk) \and
-Will Partain (partain@cs.glasgow.ac.uk) \and
-Simon L Peyton Jones (simonpj@cs.glasgow.ac.uk) \and
-Phil Wadler (wadler@cs.glasgow.ac.uk)
-}
-
-\maketitle
-
-\begin{abstract}
-This list covers internal design documents and manuals for the GRIP
-and GRASP projects.
-They are mainly intended for internal consumption, or for brave friends.
-
-Reports and papers designed for more general consumption are given in
-a separate list.
-
-They of them can be obtained by writing to
-Teresa Currie, Dept of Computing Science,
-University of Glasgow G12 8QQ, UK. Her electronic mail address is
-teresa@uk.ac.glasgow.cs.
-\end{abstract}
-
-
-\section{Manuals, design documents and guides}
-
-\reference{Kevin Hammond and Simon L Peyton Jones}
-{Mail server guide}
-{Nov 1990}
-{
-A guide to the GRIP Mail Server
-}
-
-\reference{Kevin Hammond, Simon L Peyton Jones and Jon Salkild}
-{GLOS 2.0 - The GRIP Lightweight Operating System}
-{University College London, January 1989}
-{
-GLOS is a lightweight multitasking non-preemptive operating
-for the GRIP multiprocessor.
-This paper describes the operating system from the programmer's point of
-view.
-}
-
-\reference{Simon L Peyton Jones and Jon Salkild}
-{GRIP system user manual}
-{University College London, January 1989}
-{
-This document describes how to configure, boot and run the GRIP system,
-using the sys2 system mangement program.
-}
-
-\reference{Simon L Peyton Jones}
-{The BIP front panel user manual}
-{University College London, January 1989}
-{
-This document describes {bsim} the program which runs on the GRIP host
-Unix machine, and provides a front-panel interface to the BIP.
-It assumes familiarity with the BIP architecture.
-}
-
-\reference{Chris Clack}
-{The GRIP Intelligent Memory Unit microprogrammer's guide}
-{University College London, January 1989}
-{
-This paper encapsulates the spectrum of knowledge required to microprogram
-the GRIP Intelligent Memory Units (IMUs). It gives a detailed
-description of the IMU hardware and its microassembler, together with
-the library of predefined microcode macros.
-An overview of the the Bus Interface
-Processor (BIP) hardware and its interface protocols is also provided.
-}
-
-\reference{Chris Clack}
-{Diagnostic control and simulation of GRIP Intelligent Memory Units - the
-msH user guide}
-{University College London, January 1989}
-{
-Software has been written to facilitate interaction with the diagnostic
-hardware embedded in each GRIP Intelligent Memory Unit (IMU).
-The msS program precisely emulates an IMU, and can be used to help
-debug IMU microcode in the absence of real hardware.
-The msH program interfaces directly to the actual hardware.
-Both msS and msH are driven by the same interactive front panel, which
-both acts a command interpreter and manages the display screen.
-
-The paper is mainly concerned with a description of the front-panel and
-how to use it, but also gives a brief overview of the IMU architecture.
-}
-
-\end{document}
diff --git a/ghc/docs/abstracts/slpj.sty b/ghc/docs/abstracts/slpj.sty
deleted file mode 100644
index 9027eabbd4..0000000000
--- a/ghc/docs/abstracts/slpj.sty
+++ /dev/null
@@ -1,41 +0,0 @@
-% Style file for Simon's documents
-
-\batchmode
-\sloppy
-
-%****************************************************************
-%* *
-%* Page and paragraph format *
-%* *
-%****************************************************************
-
-% Margins and page layout
-
-\input{a4wide.sty}
-
-%\setlength{\topmargin}{-1cm}
-%\setlength{\oddsidemargin}{-0.5cm}
-%\setlength{\evensidemargin}{-0.5cm}
-%\setlength{\headheight}{0cm}
-%\setlength{\headsep}{0cm}
-%\setlength{\textwidth}{17cm}
-%\setlength{\textheight}{23.5cm}
-
-\setlength{\marginparwidth}{1.5cm}
-
-% Block paragraphs
-
-\setlength{\parskip}{0.25cm}
-\setlength{\parsep}{0.25cm}
-\setlength{\topsep}{0cm} % Reduces space before and after verbatim,
- % which is implemented using trivlist
-\setlength{\parindent}{0cm}
-
-\renewcommand{\textfraction}{0.2}
-\renewcommand{\floatpagefraction}{0.7}
-
-
-\input{useful.sty}
-
-
-
diff --git a/ghc/docs/abstracts/useful.sty b/ghc/docs/abstracts/useful.sty
deleted file mode 100644
index bc901a359a..0000000000
--- a/ghc/docs/abstracts/useful.sty
+++ /dev/null
@@ -1,186 +0,0 @@
-%****************************************************************
-%* *
-%* GENERALLY USEFUL MACROS *
-%* *
-%****************************************************************
-
-
-%****************************************************************
-%* *
-%* Some standard abbreviations *
-%* *
-%****************************************************************
-
-% Haskell name
-\newcommand{\Haskell}[1]{Haskell}
-
-% \ba \ea: Abbreviations for begin and end array
-%
-\newcommand{\ba}{\begin{array}}
-\newcommand{\ea}{\end{array}}
-
-
-%****************************************************************
-%* *
-%* Keeping text together *
-%* *
-%****************************************************************
-
-% Use \begin{together} (or \bt)
-% \end{together} (or \et)
-%
-% to keep a paragraph together on a single page.
-
-\newenvironment{together}%
- {\begin{flushleft}\begin{minipage}{\textwidth}}%
- {\end{minipage}\end{flushleft}}
-
-\newcommand{\bt}{\begin{together}}
-\newcommand{\et}{\end{together}}
-
-
-%****************************************************************
-%* *
-%* ToDo macro (cf showtodo.sty) *
-%* *
-%****************************************************************
-
-\newcommand{\ToDo}[1]{}
-
-
-%****************************************************************
-%* *
-%* Making boxes round things *
-%* *
-%****************************************************************
-
-% \outline{text} typesets text in a centred framed box of the width
-% of the page.
-%
-\newcommand{\outline}[1]{
- \begin{center}
- \fbox{
- \begin{minipage}{\linewidth}
- #1
- \end{minipage}
- }
- \end{center}
-}
-
-%****************************************************************
-%* *
-%* Math codes *
-%* *
-%****************************************************************
-
-% The mathcodes for the letters A, ..., Z, a, ..., z are changed to
-% generate text italic rather than math italic by default. This makes
-% multi-letter identifiers look better. The mathcode for character c
-% is set to "7000 (variable family) + "400 (text italic) + c.
-%
-
-
-% Old Latex
-%
-%\def\@setmcodes#1#2#3{{\count0=#1 \count1=#3
-% \loop \global\mathcode\count0=\count1 \ifnum \count0<#2
-% \advance\count0 by1 \advance\count1 by1 \repeat}}
-%
-%\@setmcodes{`A}{`Z}{"7441}
-%\@setmcodes{`a}{`z}{"7461}
-
-
-% Should work with Latex 3.0
-%
-%{\catcode`\= 11
-% \gdef\mathrm{\use@mathgroup \=cmr \z@}
-% %\gdef\mit{\use@mathgroup \=cmm \@ne}
-% \gdef\mit{\use@mathgroup \=cmt \@ne}
-% \gdef\cal{\use@mathgroup \=cmsy \tw@}
-% }
-
-\@ifundefined{selectfont}
- {}
- {\newmathalphabet{\textit}
- \addtoversion{normal}{\textit}{cmr}{m}{it}
- \addtoversion{bold}{\textit}{cmr}{bx}{it}
- \everymath{\textit}
- \everydisplay{\textit}
- }
-
-%****************************************************************
-%* *
-%* Definitions for macros used in figures *
-%* These are produced by fig2dev, so we need defns for them *
-%* *
-%****************************************************************
-
-% These ones work for 11-pt typesetting
-
-\@ifundefined{selectfont} %DL is MS scheme present?
-{}{
-\def\fiverm{\rm\tiny} % Five pt
-\def\sevrm{\rm\scriptsize} % Seven pt
-
-\def\nintt{\tt\footnotesize}
-\def\ninrm{\rm\footnotesize}
-
-\def\tenrm{\rm\small} % Ten pt
-\def\tentt{\tt\small} % Ten pt
-
-\def\elvrm{\rm\normalsize} % Eleven pt
-\def\elvit{\em\normalsize}
-
-\def\twlbf{\bf\large} % Twelve pt
-\def\twlit{\em\large}
-\def\twltt{\tt\large}
-\def\twlrm{\rm\large}
-\def\twfvtt{\tt\large}
-
-\def\frtnrm{\rm\Large} % Fourteen pt
-\def\frtnbf{\bf\Large}
-\def\frtnit{\em\Large}
-\def\frtntt{\tt\Large}
-
-\def\svtnsf{\sf\huge} % Seventeen pt
-
-
-% cant remember why I need these
-\def\egt{\size{8}{9} }
-\def\elv{\size{11}{12} }
-\def\five{\size{5}{7} }
-\def\fiv{\size{5}{6} }
-\def\frtn{\size{14}{15} }
-\def\nin{\size{9}{10} }
-\def\sev{\size{7}{8} }
-\def\six{\size{6}{7} }
-\def\svtn{\size{17}{18} }
-\def\ten{\size{10}{11} }
-\def\twfv{\size{25}{27} }
-\def\twl{\size{12}{14} }
-\def\twty{\size{20}{22} }
-}
-
-%****************************************************************
-%* *
-%* Useful symbols *
-%* *
-%****************************************************************
-
-
-% Semantic brackets
-%
-% \leftsembrac [[ left semantic bracket
-% \rightsembrac ]] right semantic bracket
-% \sembrac{x} [[x]] enclose arg in semantic brackets
-% \semfun{E}{x} E[[x]] make E curly
-%
-\newcommand{\leftsembrac}{[\![}
-\newcommand{\rightsembrac}{]\!]}
-\newcommand{\sembrac}[1]{\leftsembracb#1\rightsembrac}
-\newcommand{\semfun}[2]{{\cal #1}\db{#2}\,}
-
-% \plusplus ++ run together
-%
-\def\plusplus{\mathrel{+\!\!\!+}}
-
diff --git a/ghc/docs/add_to_compiler/Jmakefile b/ghc/docs/add_to_compiler/Jmakefile
deleted file mode 100644
index ec85333bc6..0000000000
--- a/ghc/docs/add_to_compiler/Jmakefile
+++ /dev/null
@@ -1,22 +0,0 @@
-/* this is a standalone Jmakefile; NOT part of ghc "make world" */
-
-DocProcessingSuffixRules()
-
-SRCS_VERB = \
- paper.verb \
- state-of-play.verb \
- overview.verb \
- overview-fig.verb \
- front-end.verb \
- back-end.verb \
- core-syntax.verb \
- core-summary-fig.verb \
- stg-summary-fig.verb \
- howto-add.verb
-SRCS_TEX = $(SRCS_VERB:.verb=.tex)
-
-docs:: paper.dvi
-
-paper.dvi: $(SRCS_TEX)
-
-ExtraStuffToClean( $(SRCS_TEX) )
diff --git a/ghc/docs/add_to_compiler/back-end.verb b/ghc/docs/add_to_compiler/back-end.verb
deleted file mode 100644
index 4af96e6acd..0000000000
--- a/ghc/docs/add_to_compiler/back-end.verb
+++ /dev/null
@@ -1,41 +0,0 @@
-%************************************************************************
-%* *
-\subsection{The back end of the compiler}
-\label{sec:back-end}
-%* *
-%************************************************************************
-
-The back end of the compiler begins once the typechecker's
-output has been desugared into the so-called Core syntax. Core syntax
-is discussed in Section~\ref{sec:core-syntax}.
-
-We intend the back end to be a sequence of highly effective
-CoreSyntax-to-CoreSyntax and STGsyntax-to-STGsyntax transformation
-passes, making it possible for the
-CoreSyntax$\Rightarrow$StgSyntax$\Rightarrow$Abstract~C (and on to
-machine code) denouement to produce really good code.
-
-{\em It is with these transformation passes that we are hoping for
-your enthusiastic help!} There are also some examples in the
-GHC distribution, written by people other than the original compiler
-authors---so it can be done...
-
-We already have a pretty good {\em simplifier}\srcloc{simplCore/} to
-do local transformations, written mainly by Andr\'e Santos. Among
-other things, it unfolds basic arithmetic operations and constants,
-exposing the underlying unboxed values. Those interested in the
-merits of these transformations should consult Peyton Jones and
-Launchbury's paper, ``Unboxed values as first class citizens in a
-non-strict functional language'' \cite{peyton-jones91b}.
-
-The reader interested in the final code-generation parts of the
-compiler, from Core syntax to STG syntax\srcloc{stgSyn/CoreToStg.lhs}
-to Abstract~C\srcloc{codeGen/}, should consult Peyton Jones's recent
-paper, ``Implementing lazy functional languages on stock hardware: the
-Spineless Tagless G-machine'' \cite{peyton-jones92a}.
-
-Further note: We have found that the STG
-syntax\srcloc{stgSyn/StgSyn.lhs} is the better medium for a few
-transformations\srcloc{stgSyn/SimplStg.lhs}. This is fine---STG syntax
-is a just-as-manipulable functional language as Core syntax, even if
-it's a bit messier.
diff --git a/ghc/docs/add_to_compiler/core-summary-fig.verb b/ghc/docs/add_to_compiler/core-summary-fig.verb
deleted file mode 100644
index 7e339ea8a0..0000000000
--- a/ghc/docs/add_to_compiler/core-summary-fig.verb
+++ /dev/null
@@ -1,45 +0,0 @@
-\begin{figure} \fbox{
-$\begin{array}{lrcll}
-%\\
-%\mbox{Program} & program & \rightarrow & binds & \\
-%\\
-\mbox{Bindings} & binds & \rightarrow & bind_1@;@ \ldots @;@~bind_n & n \geq 1 \\
- & bind & \rightarrow & @nonrec@~ var ~@=@~ expr \\
- && | & @rec@~ var_1 ~@=@~ expr_1 @;@ \ldots @;@~ var_n ~@=@~ expr_n & n \geq 1 \\
-\\
-\mbox{Expression} & expr
- & \rightarrow & expr_1 ~ expr_2 & \mbox{Application} \\
- && | & expr ~ type & \mbox{Type application} \\
- && | & @\@~ var~ @->@ ~ expr & \mbox{Lambda abstraction} \\
- && | & @/\@~ tyvar~ @->@ ~ expr & \mbox{Type abstraction} \\
- && | & @case@ ~expr~ @of@ ~ alts & \mbox{Case expression} \\
- && | & @let@~ bind ~@in@~ expr & \mbox{Local definition(s)} \\
- && | & con~expr_1 \ldots expr_n & \mbox{Saturated constructor} \\
- && | & prim~expr_1 \ldots expr_n & \mbox{Saturated primitive} \\
- && | & var & \mbox{Variable} \\
- && | & literal & \\
-\\
-\mbox{Alternatives} & alts & \rightarrow
- & calt_1@;@ \ldots @;@~calt_n@; default ->@~ expr
- & n \geq 0~\mbox{(Boxed)} \\
- && | & lalt_1@;@ \ldots @;@~lalt_n@;@~var ~@->@~ expr
- & n \geq 0~\mbox{(Unboxed)} \\
-\\
-\mbox{Constructor alt}
- & calt & \rightarrow & con~var_1 \ldots var_n~@->@~expr & n \geq 0 \\
-\mbox{Literal alt}
- & lalt & \rightarrow & literal~@->@~expr & \\
-\\
-\mbox{Literals} & literal
- & \rightarrow & integer & \mbox{machine-level numbers} \\
- && | & \ldots & \\
-\\
-\mbox{Primitives} & prim
- & \rightarrow & @+@ ~|~ @-@ ~|~ @*@ ~|~ @/@ & \mbox{machine-level ops} \\
- && | & \ldots & \\
-\\
-\end{array}$
-}
-\caption{Abstract syntax of the Core language}
-\label{fig:core-syntax}
-\end{figure}
diff --git a/ghc/docs/add_to_compiler/core-syntax.verb b/ghc/docs/add_to_compiler/core-syntax.verb
deleted file mode 100644
index a5b8d091cf..0000000000
--- a/ghc/docs/add_to_compiler/core-syntax.verb
+++ /dev/null
@@ -1,139 +0,0 @@
-%************************************************************************
-%* *
-\section{Core syntax, and transformations on it}
-\label{sec:core-syntax}
-%* *
-%************************************************************************
-
-The @CoreSyntax@ datatype is intended to be the {\em lingua franca} of
-the back end of the compiler; a summary is shown in
-Figure~\ref{fig:core-syntax}.
-\input{core-summary-fig}
-As you can see, the Core syntax is a simple
-functional language.
-
-\subsection{Second-order polymorphic lambda calculus}
-\label{sec:second-order}
-
-Core syntax is essentially the second-order polymorphic lambda
-calculus. This is reflected in the fact that Core expressions can be
-{\em type applications} or {\em type abstractions} (the types in
-question are represented as @UniTypes@, of course).\footnote{An
-interesting point: there are Core-syntax programs that cannot be
-written in Haskell! Core syntax
-is the {\em more expressive} language. One could imagine writing a
-front end (parser, etc.) for a richer programming language and still
-being able to use this compiler's back-end machinery without change.}
-
-Having explicit type application and abstraction (NB: added by
-the typechecker during translation) gives us a uniform,
-well-understood, non-{\em ad hoc} way to express the types of
-Core expressions. Given that variables (i.e., @Ids@) and other
-basic entities have their types memoised in them, it is then easy to
-work out the type of {\em any Core expression}. For example, in the
-expression\ToDo{example here}
-\begin{verbatim}
-... <example to be supplied> ...
-\end{verbatim}
-@a@ is a type variable, @(<???>)@ is a type application, and, assuming
-the type of @??@ is $some\ math\ mode\ here...$, then the type of the
-expression is @...@.
-
-The truly great thing about using the second-order polymorphic lambda
-calculus is that it is {\em easy to preserve types
-in the face of transformation passes}, however drastic their mangling
-of the original program.
-
-\ToDo{example here}
-
-\subsection{Parameterised and annotated Core syntax}
-\label{sec:parameterised-core}
-
-As we saw with the ``abstract syntax'' (in
-Section~\ref{sec:AbsSyntax}), the Core syntax is also {\em
-parameterised}, this time with respect to binders and bound-variables
-(or ``bindees''). The definition of a Core expression
-begins\srcloc{coreSyn/CoreSyn.lhs}:
-\begin{mytightcode}
-data CoreExpr binder bindee
- = CoVar bindee
- | CoLit CoreLiteral
- ...
-type PlainCoreBinder = Id
-type PlainCoreBindee = Id
-type PlainCoreExpr = CoreExpr PlainCoreBinder PlainCoreBindee\end{mytightcode}
-Most back-end passes use the parameterisation shown above, namely
-@PlainCoreExprs@\srcloc{coreSyn/PlainCore.lhs}, parameterised on @Id@
-for both binders and bindees.
-
-An example of a pass that uses a different parameterisation is
-occurrence analysis\srcloc{simplCore/OccurAnal.lhs}, which gathers
-up info about the {\em occurrences} of bound variables. It uses:
-\begin{mytightcode}
-data BinderInfo {\dcd\rm-- various things to record about binders...}
-type TaggedBinder tag = (Id, tag)
-type TaggedCoreExpr tag = CoreExpr (TaggedBinder tag) Id
-
-substAnalyseExpr :: PlainCoreExpr -> TaggedCoreExpr BinderInfo\end{mytightcode}
-The pass's expression-mangling function then has the unsurprising type
-shown above.
-
-Core syntax has a ``twin'' datatype that is also sometimes useful:
-{\em annotated} Core syntax\srcloc{coreSyn/AnnCoreSyn.lhs}. This is a
-datatype identical in form to Core syntax, but such that every
-``node'' of a Core expression can be annotated with some information
-of your choice. As an example, the type of a pass that attaches a
-@Set@ of free variables to every subexpression in a Core expression
-might be\srcloc{coreSyn/FreeVars.lhs}:
-\begin{mytightcode}
-freeVars :: PlainCoreExpr -> AnnCoreExpr Id Id (Set Id)
- {\dcd\rm-- parameterised on binders, bindees, and annotation}\end{mytightcode}
-
-\subsection{Unboxing and other Core syntax details}
-\label{sec:unboxing}
-
-One facet of the Core syntax summary in Figure~\ref{fig:core-syntax}
-that may have caught your eye is the separation of case-alternatives
-into those for boxed entities (ordinary data constructors) and unboxed
-entities (real machine-level things). The ``literals'' and
-``primitives'' mentioned there are also machine-level constructs. It
-is for this reason that all applications of constructors and
-primitives are {\em saturated}; what use, for example, is
-a machine-level addition if you do not
-have two arguments to feed to it? (Most machines do not offer curried
-arithmetic in their hardware.)
-
-The handling of unboxed values in the back end of the compiler follows
-the design described in the Peyton Jones/Launchbury paper on the
-subject \cite{peyton-jones91b}. You, the enthusiastic optimiser, only
-need to be aware that this is the ``level of discourse.'' You will
-also probably want to be sure that your optimisations can take full
-advantage of the explicitness of the unboxery.
-
-\subsection{``Core Lint''---your dear friend}
-\label{sec:core-lint}
-
-ToDo ToDo
-
-% \subsection{STG syntax}
-% \label{sec:stg-syntax}
-%
-% As mentioned earlier, the compiler converts Core syntax into ``STG
-% syntax'' (named for the Spineless Tagless G-machine) before finally
-% making its move into the dark world we call ``Abstract~C''.
-%
-% Figure~\ref{fig:stg-syntax} shows the STG syntax,
-% \input{stg-summary-fig}
-% mainly so that you can compare it with Core syntax. (It is at least
-% conceivable that you might to perform your optimisation pass at this
-% level.)
-%
-% STG syntax is a truly austere functional language. In places where
-% Core syntax allows "exprs", STG syntax insists on "vars"---everything
-% has been flattened out. Type information (abstractions and
-% applications) have been thrown overboard. Other than that, STG syntax
-% is the ``same'' as Core syntax, with some extra non-essential
-% annotations on bindings: update flags and free-variable information.
-%
-% You will want to consult the revised Spineless Tagless G-machine paper
-% \cite{peyton-jones92a} if you wish to spend any time in the STG world.
diff --git a/ghc/docs/add_to_compiler/front-end.verb b/ghc/docs/add_to_compiler/front-end.verb
deleted file mode 100644
index 5c3c41dd38..0000000000
--- a/ghc/docs/add_to_compiler/front-end.verb
+++ /dev/null
@@ -1,297 +0,0 @@
-%************************************************************************
-%* *
-\subsection{The front end of the compiler}
-\label{sec:front-end}
-%* *
-%************************************************************************
-
-The previous section covered the main points about the front end of
-the compiler: it is dominated by a ``renamer'' and a typechecker
-working directly at the Haskell source level. In this section, we
-will look at some basic datatypes used or introduced in the front
-end---ones that you may see as input to your optimisation pass.
-
-\subsubsection{``Abstract syntax'', a source-level datatype}
-\label{sec:AbsSyntax}
-
-As Figure~\ref{fig:overview} shows, the typechecker both reads and
-writes a collection of datatypes we call ``Abstract syntax.''
-This is misleading in that what
-goes into the typechecker is quite different from what comes out.
-
-Let's first consider this fragment of the abstract-syntax
-definition\srcloc{abstractSyn/HsExpr.lhs}, for Haskell explicit-list
-expressions (Haskell report, section~3.5
-\cite{hudak91a}):\nopagebreak[4]
-\begin{mytightcode}
-data Expr var pat =
- ...
- | ExplicitList [Expr var pat]
- | ExplicitListOut UniType [Expr var pat]
- ...
-
-type ProtoNameExpr = Expr ProtoName ProtoNamePat
-type RenamedExpr = Expr Name RenamedPat
-type TypecheckedExpr = Expr Id TypecheckedPat\end{mytightcode}
-an @ExplicitList@ appears only in typechecker input; an @ExplicitListOut@
-is the corresponding construct that appears
-only in the output, with the inferred type information attached.
-
-The fragment above also shows how abstract syntax is {\em parameterised}
-with respect to variables and patterns. The idea is the same for
-both; let's just consider variables.
-
-The renamer converts @ProtoNameExprs@ (expressions with
-@ProtoNames@\srcloc{basicTypes/ProtoName.lhs} as variables---little
-more than just strings) into @RenamedExprs@, which have all naming sorted out
-(using @Names@\srcloc{abstractSyn/Name.lhs}). A @Name@ is known to be
-properly bound, isn't duplicated, etc.; it's known if it's bound to a
-built-in standard-prelude entity.
-
-(The renamer also does dependency analysis, which is required to
-maximise polymorphism in a Hindley-Milner type system.)
-
-The typechecker reads the @RenamedExprs@, sorts out the types, and
-spits out @TypecheckedExprs@, with variables represented by
-@Ids@\srcloc{basicTypes/Id.lhs}. You can find out just about anything
-you want to know about a variable from its @Id@.
-
-To see what GHC makes of some Haskell, in a file @Foo.hs@, say:
-try @ghc -noC -ddump-rn4 Foo.hs@, to see what comes out of the renamer [pass~4];
-try @ghc -noC -ddump-tc Foo.hs@, to see what comes out of the typechecker.
-
-\subsubsection{Basic datatypes in the compiler}
-
-None of the internal datatypes in the example just given are
-particularly interesting except @Ids@\srcloc{basicTypes/Id.lhs}. A
-program variable, which enters the typechecker as a string, emerges as
-an @Id@.
-
-The important thing about @Id@---and the datatypes representing other
-basic program entities (type variables, type constructors, classes,
-etc.)---is that they often include {\em memoised} information that can
-be used throughout the rest of the compiler.
-
-Let us take a cursory look at @Ids@, as a representative example of
-these basic data types. (Don't be too scared---@Ids@ are the hairiest
-entities in the whole compiler!)
-Here we go:
-\begin{mytightcode}
-\codeallowbreaks{}data Id
- = Id Unique {\dcd\rm-- key for fast comparison}
- UniType {\dcd\rm-- Id's type; used all the time;}
- IdInfo {\dcd\rm-- non-essential info about this Id;}
- PragmaInfo {\dcd\rm-- user-specified pragmas about this Id;}
- IdDetails {\dcd\rm-- stuff about individual kinds of Ids.}\end{mytightcode}
-
-So, every @Id@ comes with:
-\begin{enumerate}
-\item
-A @Unique@\srcloc{basicTypes/Unique.lhs}, essentially a unique
-@Int@, for fast comparison;
-\item
-A @UniType@ (more on them below... section~\ref{sec:UniType}) giving the variable's
-type---this is the most useful memoised information.
-\item
-A @PragmaInfo@, which is pragmatic stuff the user specified for
-this @Id@; e.g., @INLINE@ it; GHC does not promise to honour these
-pragma requests, but this is where it keeps track of them.
-\item
-An @IdInfo@ (more on {\em them} below... section~\ref{sec:IdInfo}),
-which tells you all the extra things
-that the compiler doesn't {\em have} to know about an @Id@, but it's jolly nice...
-This corresponds pretty closely to the @GHC_PRAGMA@ cross-module info that you will
-see in any interface produced using @ghc -O@.
-An example of some @IdInfo@
-would be: that @Id@'s unfolding; or its arity.
-\end{enumerate}
-
-Then the fun begins with @IdDetails@...
-\begin{mytightcode}
-\codeallowbreaks{}data IdDetails
-
- {\dcd\rm---------------- Local values}
-
- = LocalId ShortName {\dcd\rm-- mentioned by the user}
-
- | SysLocalId ShortName {\dcd\rm-- made up by the compiler}
-
- {\dcd\rm---------------- Global values}
-
- | ImportedId FullName {\dcd\rm-- Id imported from an interface}
-
- | PreludeId FullName {\dcd\rm-- Prelude things the compiler ``knows'' about}
-
- | TopLevId FullName {\dcd\rm-- Top-level in the orig source pgm}
- {\dcd\rm-- (not moved there by transformations).}
-
- {\dcd\rm---------------- Data constructors}
-
- | DataConId FullName
- ConTag
- [TyVarTemplate] ThetaType [UniType] TyCon
- {\dcd\rm-- split-up type: the constructor's type is:}
- {\dcd\rm-- $\forall~tyvars . theta\_ty \Rightarrow$}
- {\dcd\rm-- $unitype_1 \rightarrow~ ... \rightarrow~ unitype_n \rightarrow~ tycon tyvars$}
-
- | TupleCon Int {\dcd\rm-- its arity}
-
- {\dcd\rm-- There are quite a few more flavours of {\tt IdDetails}...}\end{mytightcode}
-
-% A @ShortName@,\srcloc{basicTypes/NameTypes.lhs} which includes a name string
-% and a source-line location for the name's binding occurrence;
-
-In short: everything that later parts of the compiler might want to
-know about a local @Id@ is readily at hand. The same principle holds
-true for imported-variable and data-constructor @Ids@ (tuples are an
-important enough case to merit special pleading), as well as for other
-basic program entities. Here are a few further notes about the @Id@
-fragments above:
-\begin{itemize}
-\item
-A @FullName@\srcloc{basicTypes/NameTypes.lhs} is one that may be
-globally visible, with a module-name as well; it may have been
-renamed.
-\item
-@DataConKey@\srcloc{prelude/PrelUniqs.lhs} is a specialised
-fast-comparison key for data constructors; there are several of these
-kinds of things.
-\item
-The @UniType@ for @DataConIds@ is given in {\em two} ways: once, just as
-a plain type; secondly, split up into its component parts. This is to
-save frequently re-splitting such types.
-\item
-Similarly, a @TupleCon@ has a type attached, even though we could
-construct it just from the arity.
-\end{itemize}
-
-\subsubsection{@UniTypes@, representing types in the compiler}
-\label{sec:UniType}
-
-Let us look further at @UniTypes@\srcloc{uniType/}. Their definition
-is:
-\begin{mytightcode}
-\codeallowbreaks{}data UniType
- = UniTyVar TyVar
-
- | UniFun UniType {\dcd\rm-- function type}
- UniType
-
- | UniData TyCon {\dcd\rm-- non-synonym datatype}
- [UniType]
-
- | UniSyn TyCon {\dcd\rm-- type synonym}
- [UniType] {\dcd\rm--\ \ unexpanded form}
- UniType {\dcd\rm--\ \ expanded form}
-
- | UniDict Class {\dcd\rm-- for types with overloading}
- UniType
-
- {\dcd\rm-- The next two are to do with universal quantification.}
- | UniTyVarTemplate TyVarTemplate
-
- | UniForall TyVarTemplate
- UniType\end{mytightcode}
-When the typechecker processes a source module, it adds @UniType@
-information to all the basic entities (e.g., @Ids@), among other
-places (see Section~\ref{sec:second-order} for more details). These
-types are used throughout the compiler.
-
-The following example shows several things about @UniTypes@.
-If a programmer wrote @(Eq a) => a -> [a]@, it would be represented
-as:\footnote{The internal structures of @Ids@,
-@Classes@, @TyVars@, and @TyCons@ are glossed over here...}
-\begin{mytightcode}
-\codeallowbreaks{}UniForall {\dcd$\alpha$}
- (UniFun (UniDict {\dcd\em Eq} (UniTyVar {\dcd$\alpha$}))
- (UniFun (UniTyVarTemplate {\dcd$\alpha$})
- (UniData {\dcd\em listTyCon}
- [UniTyVarTemplate {\dcd$\alpha$}])))\end{mytightcode}
-From this example we see:
-\begin{itemize}
-\item
-The universal quantification of the type variable $\alpha$ is made explicit
-(with a @UniForall@).
-\item
-The class assertion @(Eq a)@ is represented with a @UniDict@ whose
-second component is a @UniType@---this
-reflects the fact that we expect @UniType@ to be used in a stylized
-way, avoiding nonsensical constructs (e.g.,
-@(UniDict f (UniDict g (UniDict h ...)))@).
-\item
-The ``double arrow'' (@=>@) of the Haskell source, indicating an
-overloaded type, is represented by the usual
-@UniFun@ ``single arrow'' (@->@), again in a stylized way.
-This change reflects the fact that each class assertion in a
-function's type is implemented by adding an extra dictionary argument.
-\item
-In keeping with the memoising tradition we saw with @Ids@, type
-synonyms (@UniSyns@) keep both the unexpanded and expanded forms handy.
-\end{itemize}
-
-\subsubsection{@IdInfo@: extra pragmatic info about an @Id@}
-\label{sec:IdInfo}
-
-[New in 0.16.] All the nice-to-have-but-not-essential information
-about @Ids@ is now hidden in the
-@IdInfo@\srcloc{basicTypes/IdInfo.lhs} datatype. It looks something
-like:
-\begin{mytightcode}
-\codeallowbreaks{}data IdInfo
- = NoIdInfo {\dcd\rm-- OK, we know nothing...}
-
- | MkIdInfo
- ArityInfo {\dcd\rm-- its arity}
- DemandInfo {\dcd\rm-- whether or not it is definitely demanded}
- InliningInfo {\dcd\rm-- whether or not we should inline it}
- SpecialisationInfo {\dcd\rm-- specialisations of this overloaded}
- {\dcd\rm-- function which exist}
- StrictnessInfo {\dcd\rm-- strictness properties, notably}
- {\dcd\rm-- how to conjure up ``worker'' functions}
- WrapperInfo {\dcd\rm-- ({\em informational only}) if an Id is}
- {\dcd\rm-- a ``worker,'' this says what Id it's}
- {\dcd\rm-- a worker for, i.e., ``who is my wrapper''}
- {\dcd\rm-- (used to match up workers/wrappers)}
- UnfoldingInfo {\dcd\rm-- its unfolding}
- UpdateInfo {\dcd\rm-- which args should be updated}
- SrcLocation {\dcd\rm-- source location of definition}\end{mytightcode}
-As you can see, we may accumulate a lot of information about an Id!
-(The types for all the sub-bits are given in the same place...)
-
-\subsubsection{Introducing dictionaries for overloading}
-
-The major novel feature of the Haskell language is its systematic
-overloading using {\em type classes}; Wadler and Blott's paper is the
-standard reference \cite{wadler89a}.
-
-To implement type classes, the typechecker not only checks the Haskell
-source program, it also {\em translates} it---by inserting code to
-pass around in {\em dictionaries}. These dictionaries
-are essentially tuples of functions, from which the correct code may
-be plucked at run-time to give the desired effect. Kevin Hammond
-wrote and described the first working implementation of type
-classes \cite{hammond89a}, and the ever-growing static-semantics paper
-by Peyton Jones and Wadler is replete with the glories of dictionary
-handling \cite{peyton-jones90a}. (By the way, the typechecker's
-structure closely follows the static semantics paper; inquirers into
-the former will become devoted students of the latter.)
-
-Much of the abstract-syntax datatypes are given
-over to output-only translation machinery. Here are a few more
-fragments of the @Expr@ type, all of which appear only in typechecker
-output:
-\begin{mytightcode}
-data Expr var pat =
- ...
- | DictLam [DictVar] (Expr var pat)
- | DictApp (Expr var pat) [DictVar]
- | Dictionary [DictVar] [Id]
- | SingleDict DictVar
- ...\end{mytightcode}
-You needn't worry about this stuff:
-After the desugarer gets through with such constructs, there's nothing
-left but @Ids@, tuples, tupling functions, etc.,---that is, ``plain
-simple stuff'' that should make the potential optimiser's heart throb.
-Optimisation passes don't deal with dictionaries explicitly but, in
-some cases, quite a bit of the code passed through to them will be for
-dictionary-handling.
diff --git a/ghc/docs/add_to_compiler/howto-add.verb b/ghc/docs/add_to_compiler/howto-add.verb
deleted file mode 100644
index ab52723f6e..0000000000
--- a/ghc/docs/add_to_compiler/howto-add.verb
+++ /dev/null
@@ -1,357 +0,0 @@
-%************************************************************************
-%* *
-\section{How to add an optimisation pass}
-%* *
-%************************************************************************
-\subsection{Summary of the steps required}
-
-Well, with all the preliminaries out of the way, here is all that it
-takes to add your optimisation pass to the new glorious Glasgow
-Haskell compiler:
-\begin{enumerate}
-\item
-Select the input and output types for your pass; these will very
-likely be particular parameterisations of the Core or annotated Core
-data types. There is a small chance you will prefer to work at the
-STG-syntax level. (If these data types are inadequate to this
-purpose, {\em please} let us know!)
-
-\item
-Depending on exactly how you want your pass to work, set up some
-monad-ery, so as to avoid lots of horrible needless plumbing. The
-whole compiler is written in a monadic style, and there are plenty of
-examples from which you may steal. Section~\ref{sec:monadic-style}
-gives further details about how you might do this.
-
-\item
-Write your optimisation pass, and...
-
-{\em Do} use the existing types in the compiler, e.g., @UniType@,
-and the ``approved'' interfaces to them.
-
-{\em Don't} rewrite lots of utility code! Scattered around in various
-sometimes-obvious places, there is considerable code already written
-for the mangling and massaging of expressions, types, variables, etc.
-
-Section~\ref{sec:reuse-code} says more about how to re-use existing
-compiler bits.
-
-\item
-Follow our naming conventions \smiley{} Seriously, it may lead to greater
-acceptance of our code and yours if readers find a system written with
-at least a veneer of uniformity.
-\ToDo{mention Style Guide, if it ever really exists.}
-
-\item
-To hook your pass into the compiler, either add something directly to
-the @Main@ module of the compiler\srcloc{main/Main.lhs}, or into the
-Core-to-Core simplification driver\srcloc{simplCore/SimplCore.lhs}, or
-into the STG-to-STG driver\srcloc{simplStg/SimplStg.lhs}.
-
-Also add something to the compilation-system
-driver\srcloc{ghc/driver/ghc.lprl}
-(called @ghc@ here) so that appropriate user-provided command-line
-options will be transmogrified into the correct options fed to the
-@Main@ module.
-
-\item
-Add some appropriate documentation to the user's guide,
-@ghc/docs/users_guide@.
-
-\item
-Run your optimisation on {\em real programs}, measure, and improve.
-(Separate from this compiler's distribution, we provide a ``NoFib''
-suite of ``real Haskell programs'' \cite{partain92a}. We strongly
-encourage their use, so you can more readily compare your work with
-others'.)
-
-\item
-Send us your contribution so we can add it to the distribution! We
-will be happy to include anything semi-reasonable.
-This will practically ensure fame, if
-not fortune, and---with a little luck---considerable notoriety.
-\end{enumerate}
-
-%************************************************************************
-%* *
-\subsection{Using monadic code}\label{sec:monadic-style}
-%* *
-%************************************************************************
-
-{\em Monads} are one way of structuring functional programs. Phil
-Wadler is their champion, and his recent papers on the subject are a
-good place to start your investigations. ``The essence of functional
-programming'' even has a section about the use of monads in this
-compiler \cite{wadler92a}! An earlier paper describes ``monad
-comprehensions'' \cite{wadler90a}. For a smaller self-contained
-example, see his ``literate typechecker'' \cite{wadler90b}.
-
-We use monads extensively in this compiler, mainly as a way to plumb
-state around. The simplest example is a monad to plumb a
-@UniqueSupply@\srcloc{basicTypes/Unique.lhs} (i.e., name supply)
-through a function.
-
-\ToDo{Actually describe one monad thing completely.}
-
-We encourage you to use a monadic style, where appropriate, in
-the code you add to the compiler. To this end, here is a list of
-monads already in use in the compiler:
-\begin{description}
-\item[@UniqueSupply@ monad:]\srcloc{basicTypes/Unique.lhs}%
-To carry a name supply around; do a @getUnique@ when you
-need one. Used in several parts of the compiler.
-
-\item[Typechecker monad:]\srcloc{typecheck/TcMonad.lhs}%
-Quite a complicated monad; carries around a substitution, some
-source-location information, and a @UniqueSupply@; also plumbs
-typechecker success/failure back up to the right place.
-
-\item[Desugarer monad:]\srcloc{deSugar/DsMonad.lhs}%
-Carries around a @UniqueSupply@ and source-location information (to
-put in pattern-matching-failure error messages).
-
-\item[Code-generator monad:]\srcloc{codeGen/CgMonad.lhs}%
-Carries around an environment that maps variables to addressing modes
-(e.g., ``in this block, @f@ is at @Node@ offset 3''); also, carries
-around stack- and heap-usage information. Quite tricky plumbing, in
-part so that the ``Abstract~C'' output will be produced lazily.
-
-\item[Monad for underlying I/O machinery:]\srcloc{ghc/lib/io/GlaIOMonad.lhs}%
-This is the basis of our I/O implementation. See the paper about it
-\cite{peyton-jones92b}.
-\end{description}
-
-%************************************************************************
-%* *
-\subsection{Adding a new @PrimitiveOp@}\label{sec:add-primop}
-%* *
-%************************************************************************
-
-You may find yourself wanting to add a new
-@PrimitiveOp@\srcloc{prelude/PrimOps.lhs} to the compiler's
-repertoire: these are the lowest-level operations that cannot be
-expressed in Haskell---in our case, things written in C.
-
-What you would need to do to add a new op:
-\begin{itemize}
-\item
-Add it to the @PrimitiveOp@ datatype in @prelude/PrimOps.lhs@; it's
-just an enumeration type.
-\item
-Most importantly, add an entry in the @primOpInfo@ function for your
-new primitive.
-\item
-If you want your primitive to be visible to some other part of the
-compiler, export it via the @AbsPrel@\srcloc{prelude/AbsPrel.lhs}
-interface (and import it from there).
-\item
-If you want your primitive to be visible to the user (modulo some
-``show-me-nonstd-names'' compiler flag...), add your primitive to one
-or more of the appropriate lists in @buildinNameFuns@, in
-@prelude/AbsPrel.lhs@.
-\item
-If your primitive can be implemented with just a C macro, add it to
-@ghc/imports/StgMacros.lh@. If it needs a C function, put that in
-@ghc/runtime/prims/@, somewhere appropriate; you might need to put a
-declaration of some kind in a C header file in @ghc/imports/@.
-\item
-If these steps are not enough, please get in touch.
-\end{itemize}
-
-%************************************************************************
-%* *
-\section{How to add a new ``PrimOp'' (primitive operation)}
-%* *
-%************************************************************************
-
-%************************************************************************
-%* *
-\section{How to add a new ``user pragma''}
-%* *
-%************************************************************************
-
-%************************************************************************
-%* *
-\section{GHC utilities and re-usable code}\label{sec:reuse-code}
-%* *
-%************************************************************************
-
-%************************************************************************
-%* *
-\subsection{Reuse existing utilities}
-%* *
-%************************************************************************
-
-Besides the utility functions provided in Haskell's standard prelude,
-we have several modules of generally-useful utilities in \mbox{\tt utils/}
-(no need to re-invent them!):
-\begin{description}
-\item[@Maybe@ and @MaybeErr@:]
-Two very widely used types (and some operations on them):
-\begin{verbatim}
-data Maybe a = Nothing | Just a
-data MaybeErr a b = Succeeded a | Failed b
-\end{verbatim}
-
-\item[@Set@:]
-A simple implementation of sets (an abstract type). The things you
-want to have @Sets@ of must be in class @Ord@.
-
-\item[@ListSetOps@:]
-A module providing operations on lists that have @Set@-sounding names;
-e.g., @unionLists@.
-
-\item[@Digraph@:]
-A few functions to do with directed graphs, notably finding
-strongly-connected components (and cycles).
-
-\item[@Util@:]
-General grab-bag of utility functions not provided by the standard
-prelude.
-\end{description}
-
-Much of the compiler is structured around major datatypes, e.g.,
-@UniType@ or @Id@. These datatypes (often ``abstract''---you can't
-see their actual constructors) are packaged with many useful
-operations on them. So, again, look around a little for these
-functions before rolling your own. Section~\ref{sec:reuse-datatypes}
-goes into this matter in more detail.
-
-%************************************************************************
-%* *
-\subsection{Use pretty-printing and forcing machinery}
-%* *
-%************************************************************************
-
-All of the non-trivial datatypes in the compiler are in class
-@Outputable@, meaning you can pretty-print them (method: @ppr@) or
-force them (method: @frc@).
-
-Pretty-printing is by far the more common operation. @ppr@ takes a
-``style'' as its first argument; it can be one of @PprForUser@,
-@PprDebug@, or @PprShowAll@, which---in turn---are intended to show
-more and more detail. For example, @ppr PprForUser@ on a @UniType@
-should print a type that would be recognisable to a Haskell user;
-@ppr PprDebug@ prints a type in the way an implementer would normally
-want to see it (e.g., with all the ``for all...''s), and
-@ppr PprShowAll@ prints everything you could ever want to know about that
-type.
-
-@ppr@ produces a @Pretty@, which should eventually wend its way to
-@main@. @main@ can then peruse the program's command-line options to
-decide on a @PprStyle@ and column width in which to print. In
-particular, it's bad form to @ppShow@ the @Pretty@ into a @String@
-deep in the bowels of the compiler, where the user cannot control the
-printing.
-
-If you introduce non-trivial datatypes, please make them instances of
-class @Outputable@.
-
-%************************************************************************
-%* *
-\subsection{Use existing data types appropriately}\label{sec:reuse-datatypes}
-%* *
-%************************************************************************
-
-The compiler uses many datatypes. Believe it or not, these have
-carefully structured interfaces to the ``outside world''! Unfortunately,
-the current Haskell module system does not let us enforce proper
-access to these datatypes to the extent we would prefer. Here is a
-list of datatypes (and their operations) you should feel free to use,
-as well as how to access them.
-
-The first major group of datatypes are the ``syntax datatypes,'' the
-various ways in which the program text is represented as it makes its
-way through the compiler. These are notable in that you are allowed
-to see/make-use-of all of their constructors:
-\begin{description}
-\item[Prefix form:]\srcloc{reader/PrefixSyn.lhs}%
-You shouldn't need this.
-
-\item[Abstract Haskell syntax:]\srcloc{abstractSyn/AbsSyn.lhs}%
-Access via the @AbsSyn@ interface. An example of what you should {\em not}
-do is import the @AbsSynFuns@ (or @HsBinds@ or ...) interface
-directly. @AbsSyn@ tells you what you're supposed to see.
-
-\item[Core syntax:]\srcloc{coreSyn/*Core.lhs}%
-Core syntax is parameterised, and you should access it {\em via one of the
-parameterisations}. The most common is @PlainCore@; another is
-@TaggedCore@. Don't use @CoreSyn@, though.
-
-\item[STG syntax:]\srcloc{stgSyn/StgSyn.lhs}%
-Access via the @StgSyn@ interface.
-
-\item[Abstract~C syntax:]\srcloc{absCSyn/AbsCSyn.lhs}%
-Access via the @AbsCSyn@ interface.
-\end{description}
-
-The second major group of datatypes are the ``basic entity''
-datatypes; these are notable in that you don't need to know their
-representation to use them. Several have already been mentioned:
-\begin{description}
-\item[UniTypes:]\srcloc{uniType/AbsUniType.lhs}%
-This is a gigantic
-interface onto the world of @UniTypes@; accessible via the
-@AbsUniType@ interface. You should import operations on all the {\em
-pieces} of @UniTypes@ (@TyVars@, @TyVarTemplates@, @TyCons@,
-@Classes@, and @ClassOps@) from here as well---everything for the
-``type world.''
-
-{\em Please don't grab type-related functions from internal modules,
-behind @AbsUniType@'s back!} (Otherwise, we won't discover the
-shortcomings of the interface...)
-
-\item[Identifiers:]\srcloc{basicTypes/Id.lhs}%
-Interface: @Id@.
-
-\item[``Core'' literals:]\srcloc{basicTypes/CoreLit.lhs}%
-These are the unboxed literals used in Core syntax onwards. Interface: @CoreLit@.
-
-\item[Environments:]\srcloc{envs/GenericEnv.lhs}%
-A generic environment datatype, plus a generally useful set of
-operations, is provided via the @GenericEnv@ interface. We encourage
-you to use this, rather than roll your own; then your code will
-benefit when we speed up the generic code. All of the typechecker's
-environment stuff (of which there is plenty) is built on @GenericEnv@,
-so there are plenty of examples to follow.
-
-\item[@Uniques@:]\srcloc{basicTypes/Unique.lhs}%
-Essentially @Ints@.
-When you need something unique for fast comparisons. Interface:
-@Unique@. This interface also provides a simple @UniqueSupply@ monad;
-often just the thing...
-
-\item[Wired-in standard prelude knowledge:]\srcloc{prelude/}%
-The compiler has to know a lot about the standard prelude. What it knows
-is in the @compiler/prelude@ directory; all the rest of the compiler
-gets its prelude knowledge through the @AbsPrel@ interface.
-
-The prelude stuff can get hairy. There is a separate document about
-it. Check the @ghc/docs/README@ list for a pointer to it...
-\end{description}
-
-The above list isn't exhaustive. By all means, ask if you think
-``Surely a function like {\em this} is in here somewhere...''
-
-
-%************************************************************************
-%* *
-\section{Cross-module pragmatic info: the mysteries revealed}
-%* *
-%************************************************************************
-
-ToDo: mention wired-in info.
-
-%************************************************************************
-%* *
-\section{GHC hacking tips and ``good practice''}
-%* *
-%************************************************************************
-
-ASSERT
-
-%************************************************************************
-%* *
-\section{Glasgow pragmatics: build trees, etc.}
-%* *
-%************************************************************************
diff --git a/ghc/docs/add_to_compiler/overview-fig.fig b/ghc/docs/add_to_compiler/overview-fig.fig
deleted file mode 100644
index a68a0daa69..0000000000
--- a/ghc/docs/add_to_compiler/overview-fig.fig
+++ /dev/null
@@ -1,136 +0,0 @@
-#FIG 2.1
-80 2
-6 264 49 379 119
-2 2 0 1 -1 0 0 0 0.000 0 0 0
- 312 69 376 69 376 101 312 101 312 69 9999 9999
-2 1 0 1 -1 0 0 0 0.000 0 1 0
- 0 0 1.000 4.000 8.000
- 272 93 328 117 344 117 344 101 9999 9999
-2 1 0 1 -1 0 0 0 0.000 0 1 0
- 0 0 1.000 4.000 8.000
- 344 69 344 53 328 53 268 73 9999 9999
--6
-6 269 149 384 219
-2 2 0 1 -1 0 0 0 0.000 0 0 0
- 317 169 381 169 381 201 317 201 317 169 9999 9999
-2 1 0 1 -1 0 0 0 0.000 0 1 0
- 0 0 1.000 4.000 8.000
- 277 193 333 217 349 217 349 201 9999 9999
-2 1 0 1 -1 0 0 0 0.000 0 1 0
- 0 0 1.000 4.000 8.000
- 349 169 349 153 333 153 273 173 9999 9999
--6
-1 1 0 1 -1 0 0 0 0.000 1 0.000 82 324 49 17 82 324 129 340
-1 1 0 1 -1 0 0 0 0.000 1 0.000 80 36 49 17 80 36 128 52
-1 1 0 1 -1 0 0 0 0.000 1 0.000 82 228 49 17 82 228 129 244
-1 1 0 1 -1 0 0 0 0.000 1 0.000 82 419 49 17 82 419 129 435
-1 1 0 1 -1 0 0 0 0.000 1 0.000 79 133 49 17 79 133 127 149
-1 1 0 1 -1 0 0 0 0.000 1 0.000 235 180 49 17 235 180 283 196
-1 1 0 1 -1 0 0 0 0.000 1 0.000 232 372 49 17 232 372 280 388
-1 1 0 1 -1 0 0 0 0.000 1 0.000 233 276 49 17 233 276 281 292
-1 1 0 1 -1 0 0 0 0.000 1 0.000 232 85 49 17 232 85 280 101
-1 1 0 1 -1 0 0 0 0.000 1 0.000 233 467 49 17 233 467 281 483
-2 1 0 1 -1 0 0 0 0.000 0 1 0
- 0 0 1.000 4.000 8.000
- 81 292 81 308 9999 9999
-2 1 0 1 -1 0 0 0 0.000 0 1 0
- 0 0 1.000 4.000 8.000
- 81 244 81 260 9999 9999
-2 2 0 1 -1 0 0 0 0.000 0 0 0
- 33 260 129 260 129 292 33 292 33 260 9999 9999
-2 2 0 1 -1 0 0 0 0.000 0 0 0
- 33 164 129 164 129 196 33 196 33 164 9999 9999
-2 1 0 1 -1 0 0 0 0.000 0 1 0
- 0 0 1.000 4.000 8.000
- 81 101 81 117 9999 9999
-2 1 0 1 -1 0 0 0 0.000 0 1 0
- 0 0 1.000 4.000 8.000
- 81 53 81 69 9999 9999
-2 1 0 1 -1 0 0 0 0.000 0 1 0
- 0 0 1.000 4.000 8.000
- 81 148 81 164 9999 9999
-2 1 0 1 -1 0 0 0 0.000 0 1 0
- 0 0 1.000 4.000 8.000
- 81 196 81 212 9999 9999
-2 2 0 1 -1 0 0 0 0.000 0 0 0
- 33 69 129 69 129 101 33 101 33 69 9999 9999
-2 2 0 1 -1 0 0 0 0.000 0 0 0
- 33 356 129 356 129 388 33 388 33 356 9999 9999
-2 2 0 1 -1 0 0 0 0.000 0 0 0
- 33 451 129 451 129 483 33 483 33 451 9999 9999
-2 1 0 1 -1 0 0 0 0.000 0 1 0
- 0 0 1.000 4.000 8.000
- 81 388 81 403 9999 9999
-2 1 0 1 -1 0 0 0 0.000 0 1 0
- 0 0 1.000 4.000 8.000
- 81 435 81 451 9999 9999
-2 1 0 1 -1 0 0 0 0.000 0 1 0
- 0 0 1.000 4.000 8.000
- 81 340 81 356 9999 9999
-2 1 0 1 -1 0 0 0 0.000 0 1 0
- 0 0 1.000 4.000 8.000
- 81 483 81 499 161 499 161 49 212 49 212 69 9999 9999
-2 2 0 1 -1 0 0 0 0.000 0 0 0
- 185 308 280 308 280 340 185 340 185 308 9999 9999
-2 1 0 1 -1 0 0 0 0.000 0 1 0
- 0 0 1.000 4.000 8.000
- 232 37 232 69 9999 9999
-2 1 0 1 -1 0 0 0 0.000 0 1 0
- 0 0 1.000 4.000 8.000
- 232 101 232 117 9999 9999
-2 2 0 1 -1 0 0 0 0.000 0 0 0
- 185 117 280 117 280 148 185 148 185 117 9999 9999
-2 2 0 1 -1 0 0 0 0.000 0 0 0
- 185 403 280 403 280 435 185 435 185 403 9999 9999
-2 1 0 1 -1 0 0 0 0.000 0 1 0
- 0 0 1.000 4.000 8.000
- 232 196 232 212 9999 9999
-2 1 0 1 -1 0 0 0 0.000 0 1 0
- 0 0 1.000 4.000 8.000
- 232 244 232 260 9999 9999
-2 1 0 1 -1 0 0 0 0.000 0 1 0
- 0 0 1.000 4.000 8.000
- 232 292 232 308 9999 9999
-2 1 0 1 -1 0 0 0 0.000 0 1 0
- 0 0 1.000 4.000 8.000
- 232 340 232 356 9999 9999
-2 1 0 1 -1 0 0 0 0.000 0 1 0
- 0 0 1.000 4.000 8.000
- 232 388 232 403 9999 9999
-2 1 0 1 -1 0 0 0 0.000 0 1 0
- 0 0 1.000 4.000 8.000
- 232 435 232 451 9999 9999
-2 1 0 1 -1 0 0 0 0.000 0 1 0
- 0 0 1.000 4.000 8.000
- 232 148 232 164 9999 9999
-2 1 0 1 -1 0 0 0 0.000 0 1 0
- 0 0 1.000 4.000 8.000
- 272 284 312 308 9999 9999
-2 2 0 1 -1 0 0 0 0.000 0 0 0
- 189 212 284 212 284 244 189 244 189 212 9999 9999
-2 4 0 3 -1 0 0 0 0.000 7 0 0
- 13 13 13 515 400 515 400 13 13 13 9999 9999
-4 0 0 10 0 -1 0 0.000 4 10 45 61 328 AbsSyntax
-4 0 0 10 0 -1 0 0.000 4 7 60 53 41 Haskell source
-4 0 0 10 0 -1 0 0.000 4 10 45 61 232 AbsSyntax
-4 0 0 10 0 -1 0 0.000 4 10 50 57 376 Typechecker
-4 0 0 10 0 -1 0 0.000 4 10 45 61 423 AbsSyntax
-4 0 0 10 0 -1 0 0.000 4 10 42 57 471 Desugarer
-4 0 0 10 0 -1 0 0.000 4 7 43 61 137 Prefix form
-4 0 0 10 0 -1 0 0.000 4 7 29 69 184 Reader
-4 0 0 10 0 -1 0 0.000 4 7 36 65 280 Renamer
-4 0 0 10 0 -1 0 0.000 4 7 38 216 232 CodeGen
-4 0 0 10 0 -1 0 0.000 4 8 43 308 328 generators
-4 0 0 10 0 -1 0 0.000 4 7 44 308 320 Other code
-4 0 0 10 0 -1 0 0.000 4 10 43 212 137 CoreToStg
-4 0 0 10 0 -1 0 0.000 4 10 47 212 89 CoreSyntax
-4 0 0 10 0 -1 0 0.000 4 10 41 212 184 StgSyntax
-4 0 0 10 0 -1 0 0.000 4 7 44 208 280 Abstract C
-4 0 0 10 0 -1 0 0.000 4 7 30 216 328 Flatten
-4 0 0 10 0 -1 0 0.000 4 7 6 228 376 C
-4 0 0 10 0 -1 0 0.000 4 10 42 212 423 C compiler
-4 0 0 10 0 -1 0 0.000 4 7 48 212 471 Native code
-4 0 0 10 0 -1 0 0.000 4 10 32 328 89 Simplify
-4 0 0 10 0 -1 0 0.000 4 7 65 201 33 Other front ends
-4 0 0 10 0 -1 0 0.000 4 10 65 42 89 Lex/Yacc parser
-4 0 0 10 0 -1 0 0.000 4 10 32 333 189 Simplify
diff --git a/ghc/docs/add_to_compiler/overview.verb b/ghc/docs/add_to_compiler/overview.verb
deleted file mode 100644
index 73f51c46a1..0000000000
--- a/ghc/docs/add_to_compiler/overview.verb
+++ /dev/null
@@ -1,70 +0,0 @@
-%************************************************************************
-%* *
-\section{Overview of the Glasgow Haskell compiler}
-%* *
-%************************************************************************
-
-Figure~\ref{fig:overview} shows a schematic overview of the Glasgow
-Haskell compiler (GHC), including all the major datatypes and most
-existing passes.
-\begin{figure}
-\centering
-\input{overview-fig}
-%\psfig{figure=closure.ps}
-\caption{Compiler overview}
-\label{fig:overview}
-\end{figure}
-The compiler is itself written in Haskell. As of now, the compiler is
-made up of about 200?~modules, with roughly 40,000?~lines of
-Haskell code, excluding comments and blank lines.
-
-The compiler divides unsurprisingly into a {\em front end} and a {\em
-back end}, corresponding to the left and right columns of
-Figure~\ref{fig:overview}, respectively.
-
-The front end, discussed further in Section~\ref{sec:front-end}, is
-the part that may report errors back to the user. The two main pieces
-are a {\em renamer}\srcloc{renamer/}, which handles naming issues,
-including support of the Haskell module system, and the {\em
-typechecker}\srcloc{typecheck/}.
-
-The front end operates on a collection of data types that we call
-``abstract syntax\srcloc{abstractSyn/}.'' These types
-match the Haskell language, construct for construct. For example,
-if you write @... [ x | x <- [1..n] ] ...@, the typechecker
-will actually see something like:
-\begin{verbatim}
-ListComp
- (Var x)
- (GeneratorQual (VarPatIn x)
- (ArithSeq (FromTo (Lit (IntLit 1)) (Var n))))
-\end{verbatim}
-So, the renamer and typechecker work on unrestructured Haskell source
-rather than its desugared equivalent. The compiler should be {\em
-quicker} to find errors (because the source is much smaller and time
-hasn't been taken desugaring), and it should report errors more
-lucidly, in terms of the original program text.
-
-A conventional desugaring pass\srcloc{deSugar/} (basically Wadler's
-Chapter~5 of Peyton Jones's 1987 implementation book
-\cite{peyton-jones87b}) converts the typechecker's abstract-syntax output
-(with types attached) into the ``CoreSyntax\srcloc{coreSyn/}'' data
-type. This data type is little more than the second-order polymorphic
-lambda calculus and is intended to be the {\em lingua franca} of the
-compiler's back end, including almost all of the optimisation passes.
-Core syntax is explained at length in Section~\ref{sec:core-syntax}.
-
-The back end of the compiler, discussed further in
-Section~\ref{sec:back-end}, takes a successfully-typechecked module
-and produces executable code for it. The back end consists of zero or
-more Core-to-Core transformation passes, followed by conversion to STG
-syntax\srcloc{stgSyn/} (a very low-level functional language, named
-after the intended Spineless Tagless G-machine\footnote{Oops! Make
-that ``shared term graph'' language! (Who's fooling who here,
-Simon?)} target architecture), then some STG-to-STG transformations,
-and finally out of the functional world\srcloc{codeGen/} into
-``Abstract~C\srcloc{absCSyn/},'' a datatype intended as an adequate
-launching pad into both portable C and into get-your-hands-{\em
-really}-dirty native-code generation for a particular instruction-set
-architecture. We can generate C, or native-code for SPARCs and DEC
-Alphas.
diff --git a/ghc/docs/add_to_compiler/paper.bbl b/ghc/docs/add_to_compiler/paper.bbl
deleted file mode 100644
index 7f2437a524..0000000000
--- a/ghc/docs/add_to_compiler/paper.bbl
+++ /dev/null
@@ -1,72 +0,0 @@
-\begin{thebibliography}{10}
-
-\bibitem{hudak91a}
-Report on the programming language {Haskell}, a non-strict purely functional
- language ({Version} 1.1), August, 1991.
-\newblock Computing Science Department, Glasgow University, forthcoming.
-
-\bibitem{hammond89a}
-Kevin Hammond.
-\newblock Implementing type classes for {Haskell}.
-\newblock In {\em Proceedings of the Glasgow Workshop on Functional
- Programming}, page ????, Fraserburgh, Scotland, August, 1989.
-
-\bibitem{partain92a}
-Will Partain.
-\newblock The {\tt nofib} benchmark suite of {Haskell} programs, 1992.
-
-\bibitem{peyton-jones87b}
-Simon~L. {Peyton Jones}.
-\newblock {\em The Implementation of Functional Programming Languages}.
-\newblock Prentice-Hall, 1987.
-
-\bibitem{peyton-jones92a}
-Simon~L. {Peyton Jones}.
-\newblock Implementing lazy functional languages on stock hardware: the
- {Spineless Tagless G-machine}.
-\newblock {\em Journal of Functional Programming}, 1992.
-\newblock To appear.
-
-\bibitem{peyton-jones91b}
-Simon~L. {Peyton Jones} and John Launchbury.
-\newblock Unboxed values as first class citizens in a non-strict functional
- language.
-\newblock In John Hughes, editor, {\em Functional Programming Languages and
- Computer Architecture (FPCA)}, volume 523 of {\em Lecture Notes in Computer
- Science}, pages 636--666, Cambridge, MA, August 26--30, 1991.
- Springer-Verlag.
-
-\bibitem{peyton-jones90a}
-Simon~L. {Peyton Jones} and Philip Wadler.
-\newblock A static semantics for {Haskell}, 1990.
-\newblock Dept.~of Computing Science, University of Glasgow.
-
-\bibitem{peyton-jones92b}
-Simon~L. {Peyton Jones} and Philip Wadler.
-\newblock Imperative functional programming (extended abstract), 1992.
-\newblock To be in POPL~'93.
-
-\bibitem{wadler90a}
-Philip Wadler.
-\newblock Comprehending monads.
-\newblock In {\em Proceedings of the 1990 ACM Conference on {LISP} and
- Functional Programming}, pages 61--78, Nice, France, June 27--29, 1990.
-
-\bibitem{wadler90b}
-Philip Wadler.
-\newblock A simple type inference algorithm, 1990.
-\newblock Dept.~of Computing Science, University of Glasgow.
-
-\bibitem{wadler92a}
-Philip Wadler.
-\newblock The essence of functional programming.
-\newblock In {\em 19th ACM Symposium on Principles of Programming Languages
- (POPL)}, page ?????, Santa Fe, NM, January ????, 1992.
-
-\bibitem{wadler89a}
-Philip~L. Wadler and Stephen Blott.
-\newblock How to make {\em ad-hoc\/} polymorphism less {\em ad hoc\/}.
-\newblock In {\em 16th ACM Symposium on Principles of Programming Languages
- (POPL)}, pages 60--76, Austin, TX, January 11--13, 1989.
-
-\end{thebibliography}
diff --git a/ghc/docs/add_to_compiler/paper.verb b/ghc/docs/add_to_compiler/paper.verb
deleted file mode 100644
index e12542611f..0000000000
--- a/ghc/docs/add_to_compiler/paper.verb
+++ /dev/null
@@ -1,128 +0,0 @@
-\documentstyle[11pt,../grasp,code]{article}
-%\documentstyle[12pt,springer-wcs,oldfontnames,code]{article}
-\setlength{\marginparwidth}{1.5cm}
-\setlength{\parskip}{0.25cm}
-\setlength{\parindent}{0cm}
-\renewcommand{\textfraction}{0.2}
-\renewcommand{\floatpagefraction}{0.7}
-%
-\newcommand{\freevars}[1]{fvs(#1)}
-%
-% to avoid src-location marginpars, comment in/out the out/in defns.
-%\newcommand{\srcloc}[1]{}
-%\newcommand{\onlyIfSrcLocs}[1]{}
-%
-\newcommand{\onlyIfSrcLocs}[1]{#1}
-%
-% Aran Lunzer told me to do this magic:
-\def\mytightcode{\codeaux{\leftmargin=0pt}}%
-\let\endmytightcode\endcodeaux
-% what he told me:
-%% CODE environment
-%% ----------------
-%% To get a single line of spacing above and below a code segment, with
-%% zero added indention (like a verbatim environment), and consistent appearance
-%% whether or not you use \codeallowbreaks:
-%%
-%% \def\code{\codeaux{\leftmargin=0pt}}
-%%
-%% Then for a normal, unbreakable section:
-%%
-%% \begin{code}
-%% first line of code
-%% ...
-%% last line of code\end{code}
-%%
-%% And for a breakable section:
-%%
-%% \begin{code}
-%% \codeallowbreaks{}first line of code
-%% ...
-%% last line of code\end{code}
-%%
-%%
-%% srcloc marginpars
-%% -----------------
-%%
-%% To ensure that marginpars appear on the same line as their associated text,
-%% especially in a description list, add a \mbox{} to their definition:
-%%
-%% \renewcommand{\srcloc}[1]{\mbox{}\marginpar{\footnotesize\tt #1}}
-%%
-%% This empty mbox doesn't introduce anything visible, but can screw up your
-%% spacing unless you are careful. So...
-%%
-%% Usage in a description list:
-%%
-%% \item[item description:]\srcloc{no spaces around!}%
-%% Here is the item text.
-%%
-%% In the middle of a sentence:
-%%
-%% And now for something\srcloc{completely} different.
-%%
-%% Near a period or colon (MUST come before the punctuation):
-%%
-%% Hello, good evening, and welcome\srcloc{foo}. Here is the fnord.
-%
-\begin{document}
-\title{How to Add an Optimisation Pass\\
-to the Glasgow Haskell compiler\\
-(two months before version~0.23)}
-\author{Will Partain, acting as AQUA Project scribe\\
-e-mail contact: partain@@dcs.glasgow.ac.uk}
-\renewcommand{\today}{October, 1994}
-\maketitle
-% temporarily....
-\tableofcontents
-%\clearpage
-\begin{abstract}
-A major purpose of the new Glasgow Haskell compiler (written in
-Haskell) is to be freely available in source form so that others can
-use it as ``root stock'' onto which they may graft their own wonderful
-bits. This document is a field guide for the aspiring
-better-compiler grower, particularly one who wishes to add an
-optimisation pass.
-\end{abstract}
-
-\onlyIfSrcLocs{Throughout this paper, pointers to the relevant
-source-code are given in the margins. This code is in the {\tt
-ghc/compiler/} part of the distribution; names ending in {\tt /} are
-directories. We assume you already know Haskell.}
-
-% \input{state-of-play}
-
-\input{overview}
-
-\input{front-end}
-\input{back-end}
-
-\input{core-syntax}
-
-\input{howto-add}
-
-%************************************************************************
-%* *
-\section{For further information}
-%* *
-%************************************************************************
-
-Besides the documents listed in the References below, there are
-several internal compiler documents that come with the GHC
-distribution\srcloc{ghc/docs/README}.
-
-If you are hacking GHC, you should be on the @glasgow-haskell-users@
-mailing list. Send mail to
-@glasgow-haskell-users-request@@dcs.glasgow.ac.uk@ to subscribe.
-You may wish to subscribe to our ``bugs channel'' (
-@glasgow-haskell-bugs-request@@dcs.glasgow.ac.uk@) as well, if you
-are a glutton for punishment.
-
-Further suggestions as to how we can make your job easier will be most
-appreciated.
-
-\bibliographystyle{wpplain} % wpplain, wplong, wpannote, ...
-\bibliography{wp_abbrevs,comp}
-
-%\printindex
-\end{document}
diff --git a/ghc/docs/add_to_compiler/slides-root.tex b/ghc/docs/add_to_compiler/slides-root.tex
deleted file mode 100644
index 163cc3d856..0000000000
--- a/ghc/docs/add_to_compiler/slides-root.tex
+++ /dev/null
@@ -1,8 +0,0 @@
-\documentstyle{slides}
-\pagestyle{empty}
-%\onlyslides{1-99}
-%\onlynotes{1-99}
-\begin{document}
-\blackandwhite{slides}
-%\input{slides}
-\end{document}
diff --git a/ghc/docs/add_to_compiler/slides.tex b/ghc/docs/add_to_compiler/slides.tex
deleted file mode 100644
index 947adcb12a..0000000000
--- a/ghc/docs/add_to_compiler/slides.tex
+++ /dev/null
@@ -1,86 +0,0 @@
-%01 title
-\begin{slide}{}
-\begin{center}
-{\Large
-How To Add\\
-An Optimisation Pass To\\
-The Glasgow Haskell Compiler\\[40pt]
-}
-{\large
-Will Partain\\
-(GRASP Project scribe)
-}
-\end{center}
-\end{slide}
-
-%02 hello, world
-\begin{slide}{}
-{\Large The state of play}
-
-\begin{verbatim}
-sun3% time gcc -c hello.c
-0.240u 0.520s 0:01.00 76.0% 0+51k 0+9io 0pf+0w
-
-sun3% time nlmlc -c hello.m
-3.320u 1.740s 0:05.65 89.5% 0+240k 1+21io 1pf+0w
-
-sun3% time nhc -c hello.hs
-26.680u 2.860s 0:32.00 92.3% 0+950k 2+31io 18pf+0w
-
-sun3% time do100x # C
-6.980u 7.880s 0:14.93 99.5% 0+50k 0+0io 0pf+0w
-
-sun3% time do100x # LML
-7.880u 10.500s 0:18.50 99.3% 0+57k 1+0io 1pf+0w
-
-sun3% time do100x # haskell
-7.760u 10.440s 0:18.48 98.4% 0+56k 1+0io 1pf+0w
-\end{verbatim}
-\end{slide}
-%% % time hello100 > /dev/null
-%% 0.060u 0.100s 0:00.16 100.0% 0+51k 0+0io 0pf+0w
-
-%03 analyses
-\begin{slide}{}
-{\Large Analyses (FPCA~'89, PLDI~'91)}
-
-binding-time analysis\\
-closure analysis\\
-complexity analysis\\
-demand analysis\\
-facet analysis\\
-interference analysis\\
-lifetime analysis\\
-liveness analysis\\
-path analysis\\
-polymorphic-instance analysis\\
-stacklessness anaysis\\
-strictness analysis\\
-time analysis\\
-update analysis
-\end{slide}
-
-\begin{note}
-Contrast with conventional-compiler concerns:
-
-use of runtime feedback\\
-matching w/ low-level hardware concerns\\
-work very hard for their extra information
-\end{note}
-
-\begin{slide}{}
-{\Large Optimisations in use: LML}
-
-\begin{itemize}
-\item
-constant folding, arithmetic simplification
-\item
-many local transformations (case of case...)
-\item
-inlining, $\beta$-reduction
-\item
-strictness analysis
-\item
-G-code and m-code optimisation
-\end{itemize}
-\end{slide}
diff --git a/ghc/docs/add_to_compiler/state-of-play.NOTES b/ghc/docs/add_to_compiler/state-of-play.NOTES
deleted file mode 100644
index cdfa7d83f0..0000000000
--- a/ghc/docs/add_to_compiler/state-of-play.NOTES
+++ /dev/null
@@ -1,73 +0,0 @@
-analyses:
- strictness & binding-time analysis (\cite{launchbury91a})
- polymorphic-instance analysis (pldi 91; referred \cite{launchbury91a}, p 86 top left)
- facet analysis (part of [higher-order offline] parameterized partial evaluation)
- (pldi 91: \cite{consel91a})
- binding-time analysis (fpca89; \cite{mogensen91})
- strictness analysis (\cite{wadler87a})
- update analysis (fpca; \cite{bloss89b})
- path analysis (fpca; \cite{bloss89b})
- interference, closure, and lifetime analysis (fpca; \cite{sestoft89a})
- stacklessness anaysis (fpca; \cite{lester89b})
- liveness analysis (AHU, reffed by lester89b)
- complexity analysis (fpca, \cite{rosendahl89a})
- demand analysis
- time analysis
-
-type systems:
- refinement types (pldi 91; \cite{freeman91a})
- soft typing (pldi 91; \cite{cartwright91a})
-
-other:
-
-done in LML compiler:
- llift lambda lifter
- /Bconv
- simpl
- /asimpl arithmetic simplifications
- /casetr case of case ... (& a couple of others ?)
- /mlet mlet (inlining) ?
- /simpl constant folding, casefold, Esimpl, simpl,
- force arity, movelam
- strict very simple strictness analysis
- transform
- /case caseelim
- /casep condjoin
- /constr constrtr
- /lettrans let transformations
- unrec
- Gopt G-code optimiser
- mopt m-code optimiser
-
-done in yale compiler:
- (in flic)
- optimization : \beta-redn (constant propagation & inlining)
- constant folding
- dead code elim
- strictness analysis
-
-the competition:
-
- (mips compiler)
- compiles to "ucode" (symbolic assembler)
- optimisations on both ucode and binary assembler
- -O2 global ucode optimizer
- -O3 global register alloc
- -feedback file
- -cord procedure re-arranger ; reduce cache conflicts
- pixie adds things to binary for profiling
- pixstats generate exec stats from a pixified pgm
- prof analyse profile data (pc-sampling, basic-blk counting)
-
- data dependence analysis (pldi 91; \cite{maydan91a})
- (nice table of stats-- pldi 91; \cite{goff91a}, p 25)
-
- tiling for better cache hits (pldi 91: \cite{wolf91a})
-
- using real or estimated runtime profiles (pldi 91: \cite{wall91a})
-
- procedure merging w/ instruction caches (pldi 91: \cite{mcfarling91a})
-
- fortran @ 10 Gflops (pldi 91: \cite{bromley91a})
-
- global instr scheduling for superscalar machines (pldi 91: \cite{bernstein91a})
diff --git a/ghc/docs/add_to_compiler/state-of-play.verb b/ghc/docs/add_to_compiler/state-of-play.verb
deleted file mode 100644
index 301b2524db..0000000000
--- a/ghc/docs/add_to_compiler/state-of-play.verb
+++ /dev/null
@@ -1,14 +0,0 @@
-%************************************************************************
-%* *
-\section{The state of play}
-%* *
-%************************************************************************
-
-\ToDo{This section will describe the state of play: where
-functional-language compilers are; compared to their imperative
-cousins.}
-
-%The burden of proof remains with us functional programmers. We
-%encourage you to help solve this problem by contributing compiler
-%passes that optimise real programs written in a standard non-toy
-%language effectively.
diff --git a/ghc/docs/add_to_compiler/stg-summary-fig.verb b/ghc/docs/add_to_compiler/stg-summary-fig.verb
deleted file mode 100644
index 99dad9cc03..0000000000
--- a/ghc/docs/add_to_compiler/stg-summary-fig.verb
+++ /dev/null
@@ -1,55 +0,0 @@
-\begin{figure} \fbox{
-$\begin{array}{lrcll}
-%\mbox{Program} & prog & \rightarrow & binds & \\
-%\\
-\mbox{Bindings} & binds & \rightarrow
- & bind_1 @;@ \ldots @;@~ bind_n & n \geq 1 \\
-& bind & \rightarrow & var ~@=@~ vars_f ~@\@ upd~ vars_a ~@->@~expr
- & \mbox{Closure} \\
- &&&& (vars_f = \freevars{expr} \setminus vars_a) \\
-\\
-\mbox{Update flag} & upd & \rightarrow & @u@ & \mbox{Updatable} \\
- && | & @n@ & \mbox{Not updatable} \\
-\\
-\mbox{Expression} & expr
- & \rightarrow & @let@~binds~@in@~ expr
- & \mbox{Local definition} \\
- && | & @letrec@~binds~@in@~expr
- & \mbox{Local recursive definition} \\
- && | & @case@~expr~@of@~alts
- & \mbox{Case expression} \\
- && | & var~vars & \mbox{Application}\\
- && | & con~vars
- & \mbox{Saturated constructor} \\
- && | & prim~vars
- & \mbox{Saturated primitive} \\
- && | & literal & \\
-\\
-
-\mbox{Alternatives} & alts & \rightarrow
- & calt_1@;@ \ldots @;@~calt_n@; default ->@~ expr
- & n \geq 0~\mbox{(Boxed)} \\
- && | & lalt_1@;@ \ldots @;@~lalt_n@;@~var ~@->@~ expr
- & n \geq 0~\mbox{(Unboxed)} \\
-\\
-\mbox{Constructor alt}
- & calt & \rightarrow & con~vars~@->@~expr & \\
-\mbox{Literal alt}
- & lalt & \rightarrow & literal~@->@~expr & \\
-\\
-\mbox{Literals} & literal
- & \rightarrow & integer & \\
- && | & \ldots & \\
-\\
-\mbox{Primitives} & prim
- & \rightarrow & @+@ ~|~ @-@ ~|~ @*@ ~|~ @/@ \\
- && | & \ldots & \\
-\\
-\mbox{Variable lists} & vars & \rightarrow &
- @[@var_1@,@ \ldots @,@~var_n@]@ & n \geq 0 \\
-\\
-\end{array}$
-}
-\caption{Syntax of the STG language}
-\label{fig:stg-syntax}
-\end{figure}
diff --git a/ghc/docs/install_guide/Jmakefile b/ghc/docs/install_guide/Jmakefile
index 29b42b4ea6..bf16d96738 100644
--- a/ghc/docs/install_guide/Jmakefile
+++ b/ghc/docs/install_guide/Jmakefile
@@ -1,7 +1,3 @@
-LitStuffNeededHere(docs depend)
-InfoStuffNeededHere(docs)
-
-LiterateSuffixRules()
DocProcessingSuffixRules()
LitDocRootTarget(installing,lit)
diff --git a/ghc/docs/install_guide/installing.lit b/ghc/docs/install_guide/installing.lit
index 69e442726d..f184f52b61 100644
--- a/ghc/docs/install_guide/installing.lit
+++ b/ghc/docs/install_guide/installing.lit
@@ -1,18 +1,18 @@
%
-% $Header: /srv/cvs/cvs.haskell.org/fptools/ghc/docs/install_guide/Attic/installing.lit,v 1.3 1996/06/30 16:45:00 partain Exp $
+% $Header: /srv/cvs/cvs.haskell.org/fptools/ghc/docs/install_guide/Attic/installing.lit,v 1.4 1996/07/25 20:47:34 partain Exp $
%
\begin{onlystandalone}
\documentstyle[11pt,literate]{article}
\begin{document}
\title{Installing the Glasgow Functional Programming Tools\\
-Version~0.26}
-\author{The AQUA Team (scribe: Will Partain)\\
+Version~2.01}
+\author{The GHC Team\\
Department of Computing Science\\
University of Glasgow\\
Glasgow, Scotland\\
G12 8QQ\\
\\
-Email: glasgow-haskell-\{users,bugs\}-request\@dcs.glasgow.ac.uk}
+Email: glasgow-haskell-\{users,bugs\}-request\@dcs.gla.ac.uk}
\maketitle
\begin{rawlatex}
\tableofcontents
@@ -31,11 +31,6 @@ Glasgow functional-programming tools (the `Glasgow tools'), most often
just the Glasgow Haskell compiler (GHC). This document will guide you
through the installation process, and point out the known pitfalls.
-Note: As of version~0.26, this document describes how to build {\em
-all} of the Glasgow tools, not just the Haskell compiler. The
-\tr{configure} script, etc., has changed to cope with this wider
-mandate; something to bear in mind...
-
%************************************************************************
%* *
\subsection[install-strategy]{What to install? Starting from what?}
@@ -73,6 +68,10 @@ intermediate C (\tr{.hc}) files that we provide.
Building GHC on an unsupported platform falls into this category.
Please see \sectionref{booting-from-C}.
+NB: For GHC~2.01, bootstrapping from \tr{.hc} files means you will get
+an all-2.01 system---possibly unduly slow. Building with GHC~0.29
+will get you a faster compiler...
+
Once you have built GHC, you can build the other Glasgow tools with
it.
@@ -82,7 +81,7 @@ recommended, but see \sectionref{building-with-HBC}.
%************************************************************************
%* *
-\subsection[port-info]{What machines the Glasgow tools, version~0.26, run on}
+\subsection[port-info]{What machines the Glasgow tools, version~2.01, run on}
\index{ports, GHC}
\index{GHC ports}
\index{supported platforms}
@@ -113,19 +112,16 @@ supports the underlying BSDisms.
The GHC hierarchy of Porting Goodness: (a)~Best is a native-code
generator; (b)~next best is a ``registerised''
port; (c)~the bare minimum is an ``unregisterised'' port.
-``Unregisterised'' Haskell programs are much bigger and slower,
-but the port is much easier to get going.
-
-With GHC~0.26, we add ``registerised'' support for some HP-PA, iX86,
-and MIPS platforms.
+``Unregisterised'' is so terrible that we won't say more about it.
-We use Sun4s running SunOS~4.1.3 and DEC~Alphas running OSF/1~V2.0,
-so those are the ``fully-supported'' platforms, unsurprisingly. Both
-have native-code generators, for quicker compilations.
+We use Sun4s running SunOS~4.1.3 and Solaris 2.5, and DEC~Alphas
+running OSF/1~V2.0, so those are the ``fully-supported'' platforms,
+unsurprisingly. Both have native-code generators, for quicker
+compilations. The native-code generator for iX86 platforms (e.g.,
+Linux ELF) is {\em nearly} working; but is not turned on by default.
-Here's everything that's known about GHC ports, as of 0.26. We
-identify platforms by their ``canonical GNU-style'' names. We
-identify
+Here's everything that's known about GHC ports, as of 2.01. We
+identify platforms by their ``canonical GNU-style'' names.
Note that some ports are fussy about which GCC version you use; or
require GAS; or ...
@@ -153,33 +149,38 @@ libraries (see \sectionref{Pre-supposed} for details).
%-------------------------------------------------------------------
\item[HP-PA box running HP/UX 9.x:]
\index{hppa1.1-hp-hpux: registerised port}
-GHC~0.26 works registerised. No native-code generator.
+Works registerised. No native-code generator.
For GCC, you're best off with one of the Utah releases of
GCC~2.6.3 (`u3' or later), from \tr{jaguar.cs.utah.edu}.
-We don't know if straight GCC 2.7.x works or not.
+We think a straight GCC 2.7.x works, too.
Concurrent/Parallel Haskell probably don't work (yet).
\index{hppa1.1-hp-hpux: concurrent---no}
\index{hppa1.1-hp-hpux: parallel---no}
%-------------------------------------------------------------------
-\item[\tr{i386-*-linuxaout} (PCs running Linux---\tr{a.out} format):]
-\index{i386-*-linuxaout: registerised port}
-GHC~0.26 works registerised (no native-code generator).
+\item[\tr{i386-*-linux} (PCs running Linux---ELF format):]
+\index{i386-*-linux: registerised port}
+GHC~2.01 works registerised.
You {\em must} have GCC 2.7.x or later.
+The iX86 native-code generator is {\em nearly} there, but it
+isn't turned on by default.
-Concurrent/Parallel Haskell probably don't work (yet).
-\index{i386-*-linuxaout: concurrent---no}
-\index{i386-*-linuxaout: parallel---no}
-\index{i386-*-linuxaout: profiling---maybe}
-Profiling might work, but it is untested.
+Profiling works, and Concurrent Haskell works.
+\index{i386-*-linux: profiling---yes}
+\index{i386-*-linux: concurrent---yes}
+Parallel Haskell probably works.
+\index{i386-*-linux: parallel---maybe}
+
+On old Linux a.out systems: should be the same.
+\index{i386-*-linuxaout: registerised port}
%-------------------------------------------------------------------
\item[\tr{mips-sgi-irix5}:]
\index{mips-sgi-irix5: registerised port}
-GHC~0.26 works registerised (no native-code generator).
+GHC~2.01 works registerised (no native-code generator).
I suspect any GCC~2.6.x (or later) is OK. The GCC that I used
-was built with \tr{--with-gnu-as}.
+was built with \tr{--with-gnu-as}; turns out that is important!
Concurrent/Parallel Haskell probably don't work (yet).
Profiling might work, but it is untested.
@@ -211,7 +212,8 @@ Concurrent/Parallel Haskell probably won't work (yet).
%-------------------------------------------------------------------
\item[\tr{m68k-sun-sunos4} (Sun3):]
\index{m68k-sun-sunos4: registerised port}
-GHC~0.26 works registerised. No native-code generator.
+GHC~2.01 hasn't been tried on a Sun3. GHC~0.26 worked registerised.
+No native-code generator.
Concurrent/Parallel Haskell probably don't work (yet).
\index{m68k-sun-sunos4: concurrent---no}
@@ -254,6 +256,7 @@ All of the above are {\em estimates} of disk-space needs.
Use an appropriate machine, compilers, and things.
SPARC boxes and DEC Alphas running OSF/1 are fully supported.
+Linux, MIPS, and HP boxes are in pretty good shape.
\Sectionref{port-info} gives the full run-down on ports or lack
thereof.
@@ -266,11 +269,11 @@ probably need a reasonably up-to-date GCC (GNU C compiler),
too---\sectionref{port-info} lists any specific requirements in this
regard.
-If you are going to be making documents [unlikely], you'll need
-\tr{makeindex} as well, and maybe \tr{tgrind} [unlikely]. If you edit
-the one or two \tr{flex} files in GHC, you'll need \tr{flex}, too
-[unlikely].
-
+% If you are going to be making documents [unlikely], you'll need
+% \tr{makeindex} as well, and maybe \tr{tgrind} [unlikely]. If you edit
+% the one or two \tr{flex} files in GHC, you'll need \tr{flex}, too
+% [unlikely].
+%
If you end up yacc'ing the Haskell parser [unlikely], Sun's standard
\tr{/bin/yacc} won't cut it. Either the unbundled \tr{/usr/lang/yacc}
or \tr{bison} will do fine. Berkeley yacc (\tr{byacc}) won't do.
@@ -285,7 +288,7 @@ For GHC, please see the bug-reporting section of the User's guide
(separate document), to maximise the usefulness of your report.
If in doubt, please send a message to
-\tr{glasgow-haskell-bugs@dcs.glasgow.ac.uk}.
+\tr{glasgow-haskell-bugs@dcs.gla.ac.uk}.
\end{enumerate}
%************************************************************************
@@ -318,14 +321,14 @@ one bundle per \tr{.tar.gz} file.
A basic GHC ``bundle'' gives you the compiler and the standard,
sequential libraries. The files are called
-\tr{ghc-0.26-<platform>.tar.gz}, where \tr{<platform>} is one of:
-alpha-dec-osf2, hppa1.1-hp-hpux9, i386-unknown-linuxaout,
-% i386-unknown-solaris2,
+\tr{ghc-2.01-<platform>.tar.gz}, where \tr{<platform>} is one of:
+alpha-dec-osf2, hppa1.1-hp-hpux9, i386-unknown-linux,
+i386-unknown-solaris2, i386-unknown-freebsd,
m68k-sun-sunos4, mips-sgi-irix5,
sparc-sun-sunos4, sparc-sun-solaris2.
There are plenty of ``non-basic'' GHC bundles. The files for them are
-called \tr{ghc-0.26-<bundle>-<platform>.tar.gz}, where the
+called \tr{ghc-2.01-<bundle>-<platform>.tar.gz}, where the
\tr{<platform>} is as above, and \tr{<bundle>} is one of these:
\begin{description}
\item[\tr{prof}:] Profiling with cost-centres. You probably want this.
@@ -353,9 +356,9 @@ unpack them all together in the same place, thusly:
\begin{verbatim}
cd /put/them/in/here
-gunzip < ghc-0.26-sparc-sun-sunos4.tar.gz | tar xf -
-gunzip < ghc-0.26-prof-sparc-sun-sunos4.tar.gz | tar xf -
-gunzip < ghc-0.26-conc-sparc-sun-sunos4.tar.gz | tar xf -
+gunzip < ghc-2.01-sparc-sun-sunos4.tar.gz | tar xf -
+gunzip < ghc-2.01-prof-sparc-sun-sunos4.tar.gz | tar xf -
+gunzip < ghc-2.01-conc-sparc-sun-sunos4.tar.gz | tar xf -
\end{verbatim}
If you unpacked the files in a way that does {\em not} preserve
@@ -374,7 +377,7 @@ Here's what to do with the stuff in each directory, once unpacked.
\begin{description}
%---------------------------------------------------------------------
\item[\tr{bin/<platform>} (sometimes just \tr{bin/}):]
-Copy these executables so that they will be in users' PATHs.
+Copy (or link to) these executables so that they will be in users' PATHs.
%---------------------------------------------------------------------
\item[\tr{lib}:]
@@ -394,7 +397,6 @@ Things you need to fiddle so the tools will spring to life:
bin directory.
\item
-CHOICE \#1 (BETTER):
Edit your \tr{ghc}, \tr{mkdependHS}, and \tr{hstags} scripts:
(a)~Create a correct \tr{#!...perl} first line in each one. (Ask a
Unix-friendly person to help you, if you don't know what a
@@ -409,17 +411,22 @@ So, if your ``lib'' files are now in \tr{/home/myself/lib/ghc/...},
then you should set \tr{GLASGOW_HASKELL_ROOT} to \tr{/home/myself}.
\item
-CHOICE \#2:
-Set your \tr{GLASGOW_HASKELL_ROOT} environment variable, and
-don't edit the \tr{ghc}, \tr{mkdependHS}, and \tr{hstags} scripts
-at all.
-
-It's better to edit the scripts; that way, it's once for all.
+Actually setting the \tr{GLASGOW_HASKELL_ROOT} environment variable
+is a {\em bad} idea, mostly because it precludes having several
+GHC versions around at the same time.
+
+% \item
+% CHOICE \#2:
+% Set your \tr{GLASGOW_HASKELL_ROOT} environment variable, and
+% don't edit the \tr{ghc}, \tr{mkdependHS}, and \tr{hstags} scripts
+% at all.
+%
+% It's better to edit the scripts; that way, it's once for all.
\item
You {\em may} need to re-\tr{ranlib} your libraries (on Sun4s).
\begin{verbatim}
-% cd <wherever-the-lib-files-are-now>/ghc/0.26/sparc-sun-sunos4
+% cd <wherever-the-lib-files-are-now>/ghc/2.01/sparc-sun-sunos4
% foreach i ( `find . -name '*.a' -print` ) # or other-shell equiv...
? ranlib $i
? # or, on some machines: ar s $i
@@ -502,8 +509,7 @@ things you want to have anyway. Please see \sectionref{Pre-supposed}.
\item
Be sure you have a suitable Haskell compiler, or else the intermediate
-C (\tr{.hc}) files. In some cases, you might want an alternative set
-of interface (\tr{.hi}) files (quicker than generating a fresh set).
+C (\tr{.hc}) files..
\Sectionref{install-strategy} lists the various strategies you might
adopt.
@@ -524,7 +530,12 @@ find . -type f \! -name \*.hi \! -name \*.hc \! -name \*.jm -print \
Run the \tr{configure} script. It is a shell script that looks around
to find out things about your system. You can see the \tr{configure}
options by passing it a \tr{--help} flag, or by reading
-\sectionref{Configuring}.
+\sectionref{Configuring}. A typical invocation might be:
+\begin{verbatim}
+% cd <the-very-top-dir>
+% ./configure --prefix=/usr/local/fp \
+ --with-hc=ghc-0.29 --with-mkdependHS=mkdependHS-0.29
+\end{verbatim}
\item
Once configured, build the basic support utilities and make your
@@ -606,21 +617,20 @@ Because the compiler heart of Glorious Glasgow Haskell is written in
Haskell, you have to use some ``bootstrapping'' mechanism.
Your best choice, if available, is to use a binary distribution for
-your platform; i.e., compile GHC~0.26 with a GHC~0.26 that we have
+your platform; e.g., compile GHC~2.01 with a GHC~0.29 that we have
provided. Please see \sectionref{installing-bin-distrib} for how to
install a binary distribution.
-Your remaining choice is to use the intermediate C (\tr{.hc}) files that we
-supply. This is the {\em only} choice for anyone trying to port to
-a new or weakly-supported system.
+Your remaining choice is to use the intermediate C (\tr{.hc}) files
+that we supply. This is the {\em only} choice for anyone trying to
+port to a new or weakly-supported system.
The main drawback of the supplied-\tr{.hc} approach is that you will
have a lot of very bulky intermediate files on your disk for a while.
-One obscure note: if you're going to build GHC to have a native-code
-generator, you're well advised to get a suitable set of interface
-files (to save making them yourself). Please see \sectionref{Compiler_reconfig}
-if you plan to end up with a native-code generator.
+(With GHC~2.01, another drawback is that the \tr{.hc} files will give
+you a 2.01-built-with-2.01---normally a good thing---but, in this case,
+probably slower than a 2.01-built-with-0.29.)
% If you have to boot from C (\tr{.hc}) files, you should follow the
% directions in \sectionref{booting-from-C}.
@@ -680,6 +690,8 @@ Do the main GHC build, just as \tr{STARTUP} suggests:
\begin{verbatim}
% cd ghc
% make all >& make.log
+% cd ../hslibs
+% make all >& make.log
\end{verbatim}
If this fails or something seems suspicious, check the ``known
pitfalls'' (\sectionref{build-pitfalls}). If you can't figure out how
@@ -707,14 +719,14 @@ main = putStr "Hello, world!\n"
First, give yourself a convenient way to execute the driver script
\tr{ghc/driver/ghc}, perhaps something like...
\begin{verbatim}
-% ln -s /local/src/ghc-0.26/ghc/driver/ghc ~/bin/sun4/ghc
+% ln -s /local/src/ghc-2.01/ghc/driver/ghc ~/bin/alpha/ghc
% rehash
\end{verbatim}
Compile the program, using the \tr{-v} (verbose) flag to verify that
libraries, etc., are being found properly:
\begin{verbatim}
-% ghc -v -o hello -fhaskell-1.3 Main.hs
+% ghc -v -o hello Main.hs
\end{verbatim}
Now run it:
@@ -753,6 +765,8 @@ Assuming that everything's OK so far, all you need to do is:
\begin{verbatim}
% cd <very-top>/ghc
% make install
+% cd <very-top>/hslibs
+% make install
\end{verbatim}
If you're a little dubious (as I usually am), you can always do a
@@ -760,6 +774,8 @@ If you're a little dubious (as I usually am), you can always do a
\begin{verbatim}
% cd <very-top>/ghc
% make -n install >& temp-log-file-to-look-at
+% cd <very-top>/hslibs
+% make -n install >& temp-log-file-to-look-at
\end{verbatim}
In both cases, if something breaks, it's a {\em bug}.
@@ -774,7 +790,7 @@ In both cases, if something breaks, it's a {\em bug}.
%* *
%************************************************************************
-Because our documentation is in DVI/Info formats, and because there is
+Because our documentation is in DVI/Info/HTML formats, and because there is
no standard practice about how such documents are ``installed,'' we
haven't tried to automate this (at least not enough that we promise it
works).
@@ -851,10 +867,13 @@ of GHC first.
If you want to execute out of the source tree but would like to clear
off lots and lots of stuff, you can do:
\begin{verbatim}
-% cd ghc/lib # scrub library C and object files
-% rm */*.hc
+% cd ghc/lib # scrub library .hc and object files
+% rm */*.hc */*.*_hc
% find . -name '*.o' -print | xargs /bin/rm
+% cd hslibs/ # ditto for syslibs
+% rm */src/*.hc */src/*.*_hc
+
% cd ghc/compiler # scrub compiler object files
% rm */*.o
% rm */*.hc # if you have been keeping them around
@@ -879,17 +898,17 @@ because no binaries have been provided, or because the machine
is not ``fully supported.''
To boot from C (\tr{.hc}) files, you need the regular source distribution
-(\tr{ghc-0.26-src.tar.gz}) and also some extra files in
-\tr{ghc-0.26-hc-files.tar.gz}. DON'T FORGET any extra \tr{.hc}
+(\tr{ghc-2.01-src.tar.gz}) and also some extra files in
+\tr{ghc-2.01-hc-files.tar.gz}. DON'T FORGET any extra \tr{.hc}
files for profiling, concurrent, parallel, ...
Whatever you want to build, just unpack all the files ``together'':
\begin{verbatim}
% cd <wherever>
-% gunzip -c ghc-0.26-src.tar.gz | tar xf -
-% gunzip -c ghc-0.26-hc-files.tar.gz | tar xf - # basic...
-% gunzip -c ghc-0.26-prof-hc-files.tar.gz | tar xf - # profiling...
-% gunzip -c ghc-0.26-conc-hc-files.tar.gz | tar xf - # concurrent...
+% gunzip -c ghc-2.01-src.tar.gz | tar xf -
+% gunzip -c ghc-2.01-hc-files.tar.gz | tar xf - # basic...
+% gunzip -c ghc-2.01-prof-hc-files.tar.gz | tar xf - # profiling...
+% gunzip -c ghc-2.01-conc-hc-files.tar.gz | tar xf - # concurrent...
... etc ...
\end{verbatim}
@@ -1028,6 +1047,9 @@ especially, on a known machine. Also, it can take a VERY long time
(esp. on oldish machines), so it's good to run overnight, on a quiet
machine, nice'd, etc., etc.
+You will probably continue by building the system libraries:
+\tr{cd hslibs; make all}...
+
When it's all built, test your alleged GHC system, as suggested in
\sectionref{GHC_test}.
@@ -1435,30 +1457,19 @@ an initial boot are: (a)~to get a native-code generator, or (b)~if you
are going to hack on GHC.
The reason you must rebuild to get a native-code generator: The
-\tr{.hc} files will {\em not} turn into a native-code generator, and
-the distributed \tr{.hi} files ``match'' those \tr{.hc} files.
+\tr{.hc} files that we supply do {\em not} include a native-code generator.
+(They are supposed to work on and for any machine.)
From here on, I presume you've installed your booted GHC as
-\tr{ghc-0.26}.
-
-If you are going for a native-code generator, you can save yourself
-some re-compiling by getting a suitable set of interface (\tr{.hi})
-files, for GHC for your machine. You should end up doing, for example:
-\begin{verbatim}
-cd ghc-0.26/ghc/compiler # note where you are!
-
-rm */*.o # scrub the old compiler files
-
-gunzip -c ghc-0.26-hi-files-alpha.tar.gz | tar xfv -
-\end{verbatim}
+\tr{ghc-2.01}.
-Now you can configure as before, but using \tr{--with-hc=ghc-0.26}
+You can configure as before, but using \tr{--with-hc=ghc-2.01}
(\tr{config.status} records what you did before).
Running \tr{sh < STARTUP} isn't strictly necessary; you only need to
rebuild in \tr{ghc/compiler}:
\begin{verbatim}
-cd ghc-0.26/ghc/compiler
+cd ghc-2.01/ghc/compiler
make Makefile # if you didn't STARTUP...
make all EXTRA_HC_OPTS=-fvia-C # -fvia-C important!
@@ -1477,14 +1488,22 @@ type \tr{make install} in \tr{ghc/compiler} to finish the job.
%* *
%************************************************************************
-GHC~0.26 doesn't build with HBC. (It could, but we haven't put in
-the effort to maintain it.)
+GHC~2.01 doesn't build with HBC.
-GHC~0.26 is best built with itself, GHC~0.26. We heartily recommend
-it. GHC~0.26 can certainly be built with GHC~0.23 or 0.24, and with
-some earlier versions, with some effort.
+GHC~2.01 can be built with:
+\begin{description}
+\item[GHC~0.26:]
+Provided you don't use \tr{-O} (i.e., configure with
+\tr{--disable-hsc-optimised})...
+
+\item[GHC~0.29:]
+Works fine, and builds the fastest compiler; but be sure to configure
+with \tr{--with-mkdependHS=blah}, where \tr{blah} is a name to invoke
+the \tr{mkdependHS} that comes with GHC~0.29.
-GHC has never been built with compilers other than GHC and HBC.
+\item[Itself:]
+It works, but the resulting compiler is slower.
+\end{description}
%$$ If you are going to build the compiler with HBC,
%$$ please get the appropriate set of \tr{.hi} interface
@@ -1513,8 +1532,9 @@ GHC has never been built with compilers other than GHC and HBC.
%************************************************************************
Here are the gory details about some utility programs you may need;
-\tr{perl} and \tr{gcc} are the only important ones. (PVM is important if you're going for Parallel Haskell.) The
-\tr{configure} script will tell you if you are missing something.
+\tr{perl} and \tr{gcc} are the only important ones. (PVM is important
+if you're going for Parallel Haskell.) The \tr{configure} script will
+tell you if you are missing something.
\begin{description}
\item[Perl:]
@@ -1524,24 +1544,17 @@ Here are the gory details about some utility programs you may need;
for doing shell-scripty tasks that involve lots of text processing.
It is pretty easy to install.
-(We still assume Perl version 4; experience suggests that Perl~5
-is fine, too.)
+(Perl~5 is the current version; GHC is still friendly to Perl~4 as well.)
Perl should be put somewhere so that it can be invoked by the \tr{#!}
script-invoking mechanism. (I believe \tr{/usr/bin/perl} is preferred;
we use \tr{/usr/local/bin/perl} at Glasgow.) The full pathname should
be less than 32 characters long.
-Perl version 4.035 has a bug to do with recursion that will bite if
-you run the \tr{lit2texi} script, when making Info files from
-``literate'' files of various sorts. Either use a more recent version
-(4.036, or 5.00n) or an older version
-(e.g., perl 4.019).
-
\item[GNU C (\tr{gcc}):]
\index{pre-supposed: GCC (GNU C compiler)}
\index{GCC (GNU C compiler), pre-supposed}
-The current version is 2.7.0, and has no problems that we know of.
+The current version is 2.7.2, and has no problems that we know of.
If your GCC dies with ``internal error'' on some GHC source file,
please let us know, so we can report it and get things improved.
@@ -1555,7 +1568,7 @@ PVM is the Parallel Virtual Machine on which Parallel Haskell programs
run. Underneath PVM, you can have (for example) a network of
workstations (slow) or a multiprocessor box (faster).
-The current version of PVM is 3.3.7. It is readily available on
+The current version of PVM is 3.3.11; we use 3.3.7. It is readily available on
the net; I think I got it from \tr{research.att.com}, in \tr{netlib}.
A PVM installation is slightly quirky, but easy to do. Just follow
@@ -1655,7 +1668,7 @@ these are OK.
%------------------------------------------------------------------------
\item
-In 0.26, when compiling via C, you'll sometimes get ``warning:
+When compiling via C, you'll sometimes get ``warning:
assignment from incompatible pointer type'' out of GCC. Harmless.
%------------------------------------------------------------------------
@@ -1937,6 +1950,25 @@ compiler, please compile from intermediate C files (produced by GHC
somewhere else).''
%--------------------------------------------------------------
+\item[\tr{--with-mkdependHS=}{\em mkdep}:]
+\index{--with-mkdependHS configure option}
+
+Use {\em mkdep} as your \tr{mkdependHS} program. You should use the
+\tr{mkdependHS} that came with the GHC which you are probably
+specifying via \tr{--with-hc=...}.
+
+%--------------------------------------------------------------
+\item[\tr{--with-gcc=}{\em blah}:]
+\index{--with-gcc configure option}
+Use {\em blah} as my ``GNU C compiler.'' In case you have several,
+and want to chose a particular one.
+
+%--------------------------------------------------------------
+\item[\tr{--with-make=}{\em blub}:]
+\index{--with-make configure option}
+Ditto, for ``make''.
+
+%--------------------------------------------------------------
\item[\tr{--with-tmpdir=}{\em directory}:]
Set the directory where temporary files should be created. This is
\tr{/tmp} by default, which is Sometimes Uncool (because, e.g.,
@@ -1970,16 +2002,14 @@ Some common combinations would be:
./configure --prefix=/users/fp/partain --with-hc=c --disable-profiling
# use .hc files; don't bother with profiling
-./configure --with-hc=ghc-0.26 --with-readline-library --with-sockets-library
- # simple build with itself; for Sun4s & Alphas, you
- # should grab & use ghc-0.26-hi-files-<blah>.tar.gz
- # (because those machines have a native-code generator).
+./configure --with-hc=ghc-0.29 --with-readline-library --with-sockets-library
+ # simple build with 0.29
# For the extra libraries, you've got to have the right
# stuff to link to.
-./configure --with-hc=ghc-0.26 --disable-hsc-optimised --enable-hsc-debug
+./configure --with-hc=ghc-0.29 --disable-hsc-optimised --enable-hsc-debug
# Don't use -O on GHC itself; turn on -DDEBUG.
- # Slows things way down, but it's the right thing if
+ # Slows things down, but it's The Right Thing if
# you're hacking on GHC and doing lots of recompilations.
./configure --with-hc=c --enable-concurrent --enable-parallel --with-tmpdir=/usr/tmp
@@ -1993,6 +2023,17 @@ do {\em not} have a native-code generator.
%************************************************************************
%* *
+\subsection[Configuring-HsLibs]{Haskell-libraries-specific things in \tr{configure}}
+\index{Configuring the Haskell libraries}
+%* *
+%************************************************************************
+
+The normal thing is: \tr{--enable-hslibs --with-hc-for-hslibs=in-place}.
+
+NOT DONE YET.
+
+%************************************************************************
+%* *
\subsection[Configuring-Haggis]{Haggis-specific things in \tr{configure}}
\index{Configuring for Haggis}
%* *
@@ -2022,9 +2063,12 @@ MORE TO COME.
%* *
%************************************************************************
-Use \tr{--enable-nofib}. If you have NoFib and GHC in the same build
+Use \tr{--enable-nofib --with-setup=ghc}.
+If you have NoFib and GHC in the same build
tree but only want to build the NoFib suite, use \tr{--disable-ghc}.
+(If you were testing HBC on NoFib, you'd do \tr{--with-setup=hbc}, of course.)
+
You may want to enable or disable various sets of tests, as
suggested by \tr{./configure --help}. If you use \tr{--enable-all-tests},
be aware that many of them are GHC-specific. Also, we may not have
@@ -2110,18 +2154,18 @@ ghc/compiler/ & The Haskell compiler proper, called \tr{hsc}; \\
& \\
ghc/runtime/ & The runtime system, including the garbage-collector(s).\\
& \\
-ghc/lib/prelude/& Source for the linked-in code for the ``standard prelude''. \\
-ghc/lib/glaExts/ & Source for the linked-in code for our Glasgow extensions. \\
-ghc/lib/haskell-1.3/ & Source for the linked-in code for Haskell 1.3 I/O. \\
-ghc/lib/hbc/ & Source for the HBC `system library'. \\
-ghc/lib/ghc/ & Source for the GHC `system library'.\\
- & \\
-ghc/includes/ & The ``public'' .hi files slurped by the parser, \\
- & and .h files \tr{#include}d in generated .hc files come from.\\
+ghc/lib/ & Source for the linked-in code for the ``standard prelude''. \\
+ghc/includes/ & The .h files \tr{#include}d in generated .hc files.\\
& \\
ghc/docs/ & documents; see the README file there. \\
& \\
ghc/CONTRIB/ & reserved for contributed things \\
+ & \\
+hslibs/ghc/ & `ghc' system library (syslib) \\
+hslibs/hbc/ & `hbc' system library \\
+hslibs/posix/ & `posix' system library \\
+hslibs/contrib/ & `contrib' system library \\
+ & \\
haggis/ & Haggis Haskell X11 GUI toolkit \\
happy/ & Happy Haskell parser generator \\
nofib/ & NoFib Haskell benchmark and test suite \\
diff --git a/ghc/docs/release_notes/0-02-notes.lit b/ghc/docs/release_notes/0-02-notes.lit
deleted file mode 100644
index 3d4e69cab7..0000000000
--- a/ghc/docs/release_notes/0-02-notes.lit
+++ /dev/null
@@ -1,230 +0,0 @@
-\section[0-02-limitations]{Limitations of Glasgow \Haskell{}, release~0.02}
-
-[Scribe for the 0.02 notes: Cordy Hall.]
-
-These are the current major limitations of release~0.02,
-and a way to get around each if there is one.
-
-\begin{enumerate}
-\item
-{\em Doesn't yet track version~1.1 of the \Haskell{} Report.}
-If you are lucky, sections might work anyway...
-\item
-{\em No automatic importation of Prelude.} You can add an import of
-module @MiniPrel@, which is in \tr{lib/prelude/MiniPrel.hi}, and
-extend your own version of this file as you wish as long as you do not
-add anything currently built into the compiler. The following are
-already built in (see \tr{compiler/typecheck/PrelCoreEnv.lhs}):
-\begin{itemize}
-\item
-the boolean data type
-\item
-the string data type
-\item
-the primitive Haskell types, @Int@, @Char@, @Integer@, @Float@, @Double@
-\item
-function type
-\item
-list type
-\item
-tuple type (up to and including 5 tuples)
-\item
-random unboxed types (@IntPrim@, @StringPrim@, etc.)
-\item
-class @Eq@ with all operations, and the following instances:
-@Integer@, @Int@, @Rational@, @List@, @Char@, 2 tuple
-\item
-class @Ord@ with all operations, and the following instances:
-@Integer@, @Int@
-\item
-class @Num@ with all operations, and the following instances:
-@Integer@, @Int@, @Rational@
-\item
-class @Fractional@ with all operations, and the following instances:
-@Integer@, @Rational@
-\item
-cons @(:)@ and @[]@
-\end{itemize}
-\item
-{\em No renaming}
-\item
-{\em No selective export}
-\item
-{\em No re-export of imported entities}
-\item
-{\em No contexts in data declarations}
-\item
-{\em No ambiguity resolution for numeric types}
-\item
-{\em No overloaded numeric patterns or @n+k@ patterns}
-\item
-{\em No deriving clause on data type declarations.} You can get around this
-by creating explicit instances for the data type. For example, if you wanted
-to derive @Eq@ for the data type
-
-\begin{verbatim}
-data T a = D (B a) | C
-data B b = X | Y b
-\end{verbatim}
-
-then you would write
-
-\begin{verbatim}
-import MiniPrel
-
-data T a = D (B a) | C
-data B b = X | Y b
-
-instance (Eq a) => Eq (T a) where
- (D x) == (D y) = x == y
- C == C = True
- a == b = False
-
- a /= b = not (a == b)
-
-instance (Eq b) => Eq (B b) where
- X == X = True
- (Y a) == (Y b) = a == b
- a == b = False
-
- a /= b = not (a == b)
-\end{verbatim}
-
-The reason that @MiniPrel@ is imported is that it provides a type for @not@.
-\item
-{\em No default methods in class declarations}
-\end{enumerate}
-
-So far, operations on @Int@s will have code generated for them
-but more work needs to be done to handle other types
-in the code generator. However, all of the above should be handled by the
-typechecker.
-
-Other limitations:
-\begin{itemize}
-\item
-Error messages need lots of improvement.
-\item
-The generated code is inefficient, and it takes a long time to generate it.
-\item
-Documentation is minimal.
-\item
-The only programs that compile and run are those for which @main@ has
-type @Int@!!! Examples in \tr{compiler/tests/codeGen}...
-\end{itemize}
-
-\section[0-02-new-tools]{New Tools}
-
-Programs with type errors can be difficult to correct
-without some help. Unfortunately, providing this help is a difficult research
-problem. Wand's recent POPL paper suggests an interesting approach, but it
-costs overhead even when the code typechecks. Instead, we've taken the
-following approach:
-
-\begin{enumerate}
-\item
-People who program in functional languages like interpreters because they
- can find out how a small function behaves, and then deduce how it will
- behave in a larger context.
-
-\item
- Type checking is rather like debugging, so it would be nice to give the user
- something that would allow probing of function and expression types
- within the context of the rest of the program.
-
-\item
- To do this, we allow the user to attach a special type variable as a
- signature to any function definition or expression of interest. The
- typechecker can then textually substitute the type of that expression for
- the signature, and print out the original program.
-\end{enumerate}
-
-For example, in the following program
-
-\begin{verbatim}
-f x = ((g :: tyreq1) 'a',(g :: tyreq2) True)
- where
- g x = x
-\end{verbatim}
-
-the type variables @tyreq1@ and @tyreq2@ are seen as special by the compiler.
-The program printed out is:
-
-\begin{verbatim}
-f x = ((g :: Char -> Char) 'a',(g :: Bool -> Bool) True)
- where
- g x = x
-\end{verbatim}
-
-If the program was instead
-
-\begin{verbatim}
-f x = (g 'a', True)
- where
- g :: tyreq1
- g x = x
-\end{verbatim}
-
-then the program printed out would be
-
-\begin{verbatim}
-f x = (g 'a', g True)
- where
- g :: a -> a
- g x = x
-\end{verbatim}
-
-A note about these `special type variables'---the user must guarantee
-(at present) that each is unique, and that each
-begins with the string @tyreq@.
-
-At present, the typechecker fails whenever there is a type error. Once
-it can be made to succeed on errors, handing control to something
-which can deal with this situation, then it will be easy to get some
-idea of what the typechecker thought about interesting expressions in
-the code, even though these types may not yet be fully defined. {\em For
-now, this tool is really only useful if you have a program that does
-typecheck (avoiding failure) but you wish to examine some of the types
-of the program's expressions.}
-
-To use this feature, the compiler must be built using the
-\tr{-DTYPE_ERROR_HELP}
-flag (change \tr{compiler/Jmakefile} in the appropriate place). When
-invoking the driver \tr{driver/ghc}, use the \tr{-ddump-type-error}
-flag.
-
-If you do use this and/or have any comments to make, please email to
-cvh\@dcs.glasgow.ac.uk.
-
-\section[0-02-instabilities]{Known instabilities in the compiler internals}
-
-Here are some things we know we are going to change in the internals
-of the compiler. Fellow developers may want to query these if they
-think that they may be adversely affected.
-
-\begin{enumerate}
-\item
-A modest revision to the basic data types for variables, constructors,
-and type variables (currently in \tr{compiler/names/{Local,Global}.lhs}
-and in \tr{compiler/absSyntax/UniType.lhs}). See
-\tr{compiler/names/Id.lhs} for our notes on what to do.
-
-\item
-A major overhaul of the pretty-printing and error-reporting machinery.
-
-\item
-A major overhaul of the ``make world'' machinery. Ideas welcome.
-
-\item
-A fairly significant module-renaming exercise. A proposal, not yet
-agreed, is in \tr{docs/FILE-RENAMING}.
-\end{enumerate}
-
-\section[0-02-other-problems]{Other known problems in the distribution}
-
-The GNU Info-file reader (\tr{literate/info-utils/info.c}) is an
-unusually buggy version, for some reason.
-
-The tests files have been stripped out of this release, merely to
-reduce the sheer volume of the distribution. Let us know if you want
-the test files.
diff --git a/ghc/docs/release_notes/0-03-README b/ghc/docs/release_notes/0-03-README
deleted file mode 100644
index 516e449a3a..0000000000
--- a/ghc/docs/release_notes/0-03-README
+++ /dev/null
@@ -1,47 +0,0 @@
-Version 0.03 of the new Glasgow Haskell compiler was an unannounced
-(exceedingly unpolished) release for our friends at York.
-
--------------------------------------
-
-A quick list of things to note:
-
-* Significantly different parser (parsers/hsp/) and reader
- (compiler/reader/), to do Haskell 1.1 syntax. The abstract syntax
- (compiler/abstractSyn/) now covers the entire Haskell language.
-
-* Compiler files have been majorly shuffled, renamed, in part to
- ensure filenames are <= 14 chars. Another such catastrophic
- re-shuffle is unlikely for the foreseeable future.
-
- The file docs/FILE-RENAMING is a fairly complete list of
- what-changed-to-what.
-
-* Pretty-printing machinery (compiler/utils/{Pretty,Outputable}.lhs)
- is completely changed.
-
-* Identifiers are now "Ids" (compiler/basicTypes/Id.lhs), not "Locals"
- or "Globals".
-
-* What-the-compiler-must-know-about-the-prelude (compiler/prelude) is
- believed to be complete; this includes Chars, Ints, Floats, and
- Doubles as primitives.
-
- (Stuff for Integers and Rationals is NOT all there.)
-
- All other prelude stuff should be specifiable w/ an ordinary
- interface file (notably lib/prelude/MiniPrel.hi).
-
-* The compiler does full-blown overloading of expressions and
- patterns. Yell if this really won't do -- perhaps a build-time
- option to override?
-
-* All flavours of patterns and expressions (e.g., n+k patterns,
- arithmetic sequences) are in; testing in some cases near zero.
-
-* BUGS INEVITABLE, MAJOR BUGS ENTIRELY POSSIBLE, PATCHES WILL PROBABLY
- BE REQUIRED. Don't panic, report promptly!
-
-* "main"'s (main/Main.lhs) handling of command-line options [and the
- feeding of same by the driver, driver/ghc.lprl] has been revised.
-
-* Documentation has _not_ been updated.
diff --git a/ghc/docs/release_notes/0-04-README b/ghc/docs/release_notes/0-04-README
deleted file mode 100644
index 14be2b0efd..0000000000
--- a/ghc/docs/release_notes/0-04-README
+++ /dev/null
@@ -1,15 +0,0 @@
-Version 0.04 of the new Glasgow Haskell compiler was yet another
-unannounced release for our friends at York (and elswhere).
-
-----------------------------------------------------------------
-91/11/01:
-
-2 notes: [1] "main" should no longer have the non-std type "Int"; instead,
-it should have the non-std type "IOPrim"! (We're creeping towards
-real Haskell I/O.) compiler/tests/codeGen/cg001/Main.hs is a New
-Improved "main", I believe. docs/io-design may also be useful.
-[2] The old "import MiniPrel" trick has changed (and will change
-again). Because we're in the middle of trying to get full/original
-names going, you have to use one or more MODULE-SPECIFIC
-"MiniPrel*.hi" files; they're in lib/prelude.
-
diff --git a/ghc/docs/release_notes/0-05-notes.lit b/ghc/docs/release_notes/0-05-notes.lit
deleted file mode 100644
index 3f42108127..0000000000
--- a/ghc/docs/release_notes/0-05-notes.lit
+++ /dev/null
@@ -1,86 +0,0 @@
-\begin{description}
-%-------------------------------------------------------------------
-\item[1.1 syntax:]
-Does \Haskell{} version~1.1 syntax. The code for the parser
-(\tr{parsers/hsp/}) has been tidied up quite a bit [nice job, Kevin].
-
-%-------------------------------------------------------------------
-\item[Expressions and patterns:]
-All forms of expressions and patterns work, including overloaded
-patterns and @n+k@ patterns.
-
-%-------------------------------------------------------------------
-\item[A big part of the standard prelude is operational:]
-These parts (in \tr{lib/prelude}) have been compiled with the new
-compiler, and programs compiled with the new compiler can be linked to
-it.
-
-With the exceptions of (a)~hooking in the standard Haskell I/O system
-(see next item) and (b)~special pleading for constant-time-access
-arrays (or other magical features), all Prelude stuff is either done
-or easily do-able.
-
-%-------------------------------------------------------------------
-\item[Simple resolution of ambiguous overloading of numeric types:]
-(As per Haskell report, section~4.3.4). @default@ declarations do
-{\em NOT} work; however, the ``default default''
-(@default (Int, Double)@) is wired in. This should clear up nearly
-all annoying ``ambiguous dictionary'' errors.
-
-%-------------------------------------------------------------------
-\item[Better non-standard I/O:]
-We have implemented the bare bones of the I/O described in
-\tr{docs/io-design/}. It's not standard \Haskell{} I/O
-(we haven't yet implemented the impedance-matcher discussed in the
-doc), and it's not the same as what was there in 0.02. However, you
-can now write a simple reads-stdin/writes-stdout program:
-
-\begin{verbatim}
-#include "GhcPrelude.h"
-
-main = readString `thenIO` ( \ s ->
- writeString (my_String_to_String_manglification s) )
-\end{verbatim}
-
-The implementation of @error@ (via @sysError@) is also as described in
-the I/O document.
-
-%-------------------------------------------------------------------
-\item[Faster compiler:]
-The compiler is faster than version~0.02---we promise---but the
-significant tuning work is not yet done. We will do it after The
-Mangler (renamer) is in.
-
-%-------------------------------------------------------------------
-\item[Run compiled code on a Sun4:]
-If you compile your program to C (\tr{.hc} files), with, e.g.:
-
-\begin{verbatim}
-% glhc -C Foo.hs
-\end{verbatim}
-
-then you compile the output on a Sun4 with:
-
-\begin{verbatim}
-% glhc -c Foo.hc
-\end{verbatim}
-
-and, if you have the right things to link to, you can link with:
-
-\begin{verbatim}
-% glhc -o foo Foo.o
-\end{verbatim}
-
-The ``right things to link to'' include: the runtime system (
-\tr{cd runtimes/standard; make} on a sun4), and the standard libraries
-(\tr{cd lib; make all} on a sun4).
-
-We have not yet tried to make this work for Every Known Unix Box In
-The Universe. (But we plan to, with your help :-)
-
-%-------------------------------------------------------------------
-\item[Upheaval during FPCA:]
-As advertised with 0.02: Files moved around, modules and data types
-were renamed, and a generally Much Cleaner World now exists. We have
-no plans to do more of the same (at least for the compiler proper).
-\end{description}
diff --git a/ghc/docs/release_notes/0-06-notes.lit b/ghc/docs/release_notes/0-06-notes.lit
deleted file mode 100644
index e91ceac99b..0000000000
--- a/ghc/docs/release_notes/0-06-notes.lit
+++ /dev/null
@@ -1,266 +0,0 @@
-The really new thing about release 0.06 is this: if you can get
-your program through the compiler, then it should actually work when you
-run it!
-
-Another thing we have worked hard on recently is {\em documentation} (see
-next section).
-
-%************************************************************************
-%* *
-\subsection[0-06-new-docs]{New documentation, especially for hackers!}
-%* *
-%************************************************************************
-
-See the file \tr{docs/README} for a full ``roadmap'' to all known
-documentation.
-
-\begin{description}
-%-------------------------------------------------------------------
-\item[STG-machine paper.]
-A monster (70+-page) paper which gives a detailed description of the
-Spineless Tagless G-machine, on which the back end of the compiler is based.
-Simon is Jolly Proud of this paper.
-
-This paper isn't in the distribution, but is available by asking
-Simon, or by FTP from \tr{ftp.dcs.glasgow.ac.uk:pub/glasgow-fp/stg.dvi}.
-%-------------------------------------------------------------------
-\item[\tr{imports/SMinterface.lh}.]
-The storage manager is carefully isolated from the rest of the runtime
-system behind a carefully identified interface. This paper documents
-the interface, and says a little about the implementation. NB:
-``literate'' \tr{.h} file!
-%-------------------------------------------------------------------
-\item[\tr{docs/C_optimisation}.]
-Lots of details about how we use C as a target language, and
-the tricks we use to make it go fast. Still drafty.
-%-------------------------------------------------------------------
-\item[\tr{docs/runtime}.]
-The {\em beginnings} of a description of details of the runtime system
-which aren't covered by the STG paper above.
-%-------------------------------------------------------------------
-\item[\tr{docs/typecheck}.]
-The {\em beginnings} of a description of tricky corners of the type checker.
-%-------------------------------------------------------------------
-\item[\tr{docs/arrays}.]
-A work-in-progress on how to handle arrays properly.
-%-------------------------------------------------------------------
-\item[\tr{docs/core-overview}:]
-The beginnings of a description of the Core data type, plus the other
-data types you need to know about to write programs which manipulate
-the Core data type.
-
-The ``how to add to the compiler'' document
-(\tr{docs/add_to_compiler}) has some of this stuff in it, too.
-%-------------------------------------------------------------------
-\item[Type classes paper:]
-This is a short (20-page) form of the massive ``Static Semantics of
-Haskell'' paper. We submitted it to Lisp and FP 1992, but they
-(unaccountably) rejected it.
-
-This paper isn't in the distribution; please ask for it.
-\end{description}
-
-%************************************************************************
-%* *
-\subsection[0-06-new-in-compiler]{To do with the compiler proper}
-%* *
-%************************************************************************
-
-\begin{description}
-%-------------------------------------------------------------------
-%-------------------------------------------------------------------
-\item[1.2 syntax:]
-The parser handles the Haskell syntax given in the Haskell report,
-version~1.2. See \tr{parsers/hsp}.
-
-%-------------------------------------------------------------------
-\item[Graph reduction:]
-Updates are in and we're doing graph reduction! (All the bells and
-whistles for doing a good job of [avoiding] updates are not
-in yet.)
-
-See \tr{compiler/codeGen/{CodeGen,CgClosure}.lhs} and
-\tr{runtime/main/Update.lhc} for the main bits.
-
-%-------------------------------------------------------------------
-\item[Closure layout less deeply wired into compiler:]
-Rather than knowing word-for-word how each closure is layed out in
-memory, the compiler emits C macro calls to access/fill-in the ``fixed
-header'' and ``variable header'' parts of each closure (all described
-in the storage-manager document).
-
-This means, for example, that the very same C code used on sequential
-machines can be used on GRIP as well, even though closures in GRIP
-have some extra fields---all that is required is suitable \tr{.h}
-files to define the header macros accordingly!
-
-Anyone whose efforts involve munging around in or pinning things onto
-closures should like this ``feature.''
-
-%-------------------------------------------------------------------
-\item[Statistics about program execution:]
-The compiler emits macro calls into the C output which, if expanded
-(use @-DDO_RUNTIME_PROFILING@, default: on), will accumulate
-statistics about program behaviour. To have those statistics printed
-out (to @stderr@), give your program the @-p@ RTS flag, thusly:
-
-\begin{verbatim}
-% a.out +RTS -p
-\end{verbatim}
-
-We welcome any interesting profiles that you may churn up!
-
-See \tr{imports/StgProfile.h} and \tr{runtime/main/StgProfile.lc},
-plus insertions of those macro calls in
-\tr{compiler/codeGen}.
-
-%-------------------------------------------------------------------
-\item[New simplifier/transformation stuff:]
-Contributed by research students Andr\'e Santos and Andy Gill. In
-\tr{compiler/simplify} and \tr{compiler/stranal-triv}; it's still
-thoroughly experimental.
-
-The old-but-reliable brain-damaged simplifier is now in
-\tr{compiler/simplify0} and is still the default.
-
-%-------------------------------------------------------------------
-%\item[Faster compiler:]
-% (NOT QUITE YET) The compiler is again faster than the previous release
-% (version~0.05). The C output is also smaller.
-
-%-------------------------------------------------------------------
-\item[Compiler is compilable with Chalmers's HBC or Glasgow prototype compiler:]
-The default set of \tr{compiler/*/*.hi} interface files in the
-distribution are for compiling with HBC (this seems to be the people's
-preference); version 0.997.3 or greater, please.
-
-A separate set of \tr{.hi} files are provided for those who use the
-Glasgow prototype compiler. These are in the file
-\tr{ghc-0.06-proto-hi-files.tar.Z}, available wherever you got the
-basic distribution. The installation document says how to deal with
-these various blobs of files.
-
-The possibility of using HBC means you can compile the Glasgow Haskell
-compiler on any machine supported for HBC (Sun3s, Sun4s, DEC 3100s
-[and one or two more flavours?]).
-\end{description}
-
-%************************************************************************
-%* *
-\subsection[0-06-new-in-compiler-contrib]{In contributed bits of the compiler}
-%* *
-%************************************************************************
-
-\begin{description}
-%-------------------------------------------------------------------
-\item[Semantique strictness analyser:]
-The one in the distribution now builds; in \tr{compiler/stranal-sem}.
-This would only be of interest to those who might want to play with
-it. The rest of the compiler can't use its results.
-
-If you want to build this strictness analyser, you need to configure
-appropriately (see \tr{mkworld/Project-ghc-full.*}, when you get to
-that part of the installation instructions).
-\end{description}
-
-Please send us {\em your} bits for next time!
-
-%************************************************************************
-%* *
-\subsection[0-06-new-in-libraries]{In @Prelude@ and runtime support}
-%* *
-%************************************************************************
-
-\begin{description}
-%-------------------------------------------------------------------
-\item[``Binary bloat'' from the prelude, ameliorated:]
-The C files produced from compiling the prelude are chopped into
-some 400 separate C files, which are individually compiled and
-banged into an archive. Result is that you only get the bits you
-``need,'' and binary sizes have about halved.
-(There is more work to do in this department.)
-
-%-------------------------------------------------------------------
-\item[Naive implementation of Haskell I/O:]
-At least \tr{ReadChan stdin} and \tr{AppendChan stdout} requests work.
-It shouldn't be too hard to add support for other requests in
-\tr{lib/io/DialogueToIO.lhs}. (Only [extended] Haskell hacking needed!)
-
-%-------------------------------------------------------------------
-\item[Storage management moved behind a safe wall:]
-
-It's now in \tr{runtime/storage/.} All four flavours of garbage
-collector (two-space copying, one-space compacting, dual-mode, and
-Appel-like generational) seem to work.
-
-Patrick Sansom, research student and hacker extraordinaire, did all
-of this.
-
-%-------------------------------------------------------------------
-\item[Flags-to-RTS machinery in place:]
-
-When your @ghc@-compiled Haskell program starts up, any command-line
-arguments bracketted by @+RTS@ and @-RTS@ (or the end of the arguments)
-are assumed to be flags for the runtime system. These flags are used
-to alter the heap size, ask for garbage-collection stats, etc.
-
-To see what flags are available, try: \tr{myprog +RTS -f}.
-
-Patrick did this, too.
-
-%-------------------------------------------------------------------
-\item[C-optimisation sleaziness also better hidden:]
-
-It is in \tr{runtime/c-as-asm/}. (It needs to be tidier, but...)
-
-We are still actively working on getting this right. Please get in
-touch if you are interested.
-\end{description}
-
-%************************************************************************
-%* *
-\subsection[0-06-new-in-mkworld]{In distribution/build/installation machinery}
-%* *
-%************************************************************************
-
-\begin{description}
-%-------------------------------------------------------------------
-\item[Better line numbers, and Emacs-able TAGS files:]
-(Yes, they're related!) Error messages should come out with better
-line numbers than before.
-
-The distribution now includes tags-making things: souped-up \tr{etags}
-program [for C], \tr{perltags} [for perl], and \tr{hstags} [for
-Haskell] (mostly in \tr{utils/scripts}). The Haskell tags program
-uses the parser, so it Does It Right.
-
-\tr{make tags fulltags} at the top of the distribution tree will make a
-huge TAGS file for the whole compilation system.
-
-%-------------------------------------------------------------------
-\item[\tr{make install} might do something sensible:]
-Yes, it does. But you'd be well advised to do a \tr{make -n install}
-just to check first!
-\end{description}
-
-%************************************************************************
-%* *
-\subsection[0-06-new-misc]{Miscellaneous new things}
-%* *
-%************************************************************************
-
-\begin{description}
-%-------------------------------------------------------------------
-\item[Consistency-checking for executables:]
-Given that our system can generate \tr{.o} object files which are
-seriously {\em incompatible} with each other, depending on choice of
-garbage collector, degree of optimisation, whether or not compiling
-for GRIP, etc., we have implemented a scheme (in the driver \tr{ghc})
-that checks that all the linked bits in an executable ``belong''
-together.
-
-%-------------------------------------------------------------------
-\item[Scripts from Denis Howe:]
-We have included his \tr{fptags} and \tr{mira2hs} scripts that he
-posted to \tr{comp.lang.functional}.
-\end{description}
diff --git a/ghc/docs/release_notes/0-07-README b/ghc/docs/release_notes/0-07-README
deleted file mode 100644
index 4048f17747..0000000000
--- a/ghc/docs/release_notes/0-07-README
+++ /dev/null
@@ -1,4 +0,0 @@
-Version 0.07 was an unannounced not-really-a-release to a few diehard
-friends. Much of what is described in the 0.08 release notes was
-actually done in 0.07. Please see the 0.08 release ntoes for further
-details.
diff --git a/ghc/docs/release_notes/0-07-notes.lit b/ghc/docs/release_notes/0-07-notes.lit
deleted file mode 100644
index 7b729d6046..0000000000
--- a/ghc/docs/release_notes/0-07-notes.lit
+++ /dev/null
@@ -1,51 +0,0 @@
-%************************************************************************
-%* *
-\section[0-07-new]{New things in Glasgow \Haskell{}, release~0.07}
-%* *
-%************************************************************************
-
-%************************************************************************
-%* *
-\subsection[0-07-new-docs]{New documentation, especially for hackers!}
-%* *
-%************************************************************************
-
-%************************************************************************
-%* *
-\subsection[0-07-new-in-compiler]{To do with the compiler proper}
-%* *
-%************************************************************************
-
-\begin{description}
-%-------------------------------------------------------------------
-\item[eval does right thing:]
-
-%-------------------------------------------------------------------
-\item[CAFs fully supported (notably updatable ones):]
-\end{description}
-
-%************************************************************************
-%* *
-\subsection[0-07-new-in-compiler-contrib]{In contributed bits of the compiler}
-%* *
-%************************************************************************
-
-Please send us {\em your} bits for next time!
-
-%************************************************************************
-%* *
-\subsection[0-07-new-in-libraries]{In @Prelude@ and runtime support}
-%* *
-%************************************************************************
-
-%************************************************************************
-%* *
-\subsection[0-07-new-in-mkworld]{In distribution/build/installation machinery}
-%* *
-%************************************************************************
-
-%************************************************************************
-%* *
-\subsection[0-07-new-misc]{Miscellaneous new things}
-%* *
-%************************************************************************
diff --git a/ghc/docs/release_notes/0-08-notes.lit b/ghc/docs/release_notes/0-08-notes.lit
deleted file mode 100644
index eaefa740fa..0000000000
--- a/ghc/docs/release_notes/0-08-notes.lit
+++ /dev/null
@@ -1,149 +0,0 @@
-0.08 was not an announced release, so these notes are of historical
-interest, at best.
-
-%************************************************************************
-%* *
-\subsection[0-08-new-docs]{New documentation, especially for hackers!}
-%* *
-%************************************************************************
-
-%************************************************************************
-%* *
-\subsection[0-08-new-in-usage]{Main user-visible changes}
-%* *
-%************************************************************************
-
-\begin{description}
-%-------------------------------------------------------------------
-\item[@ghc@ driver flag fiddling:]
-These things change... A good thing to try is \tr{ghc -help}, unless
-of course you think the documentation might be right (in which case
-look there :-).
-
-%-------------------------------------------------------------------
-\item[@ghc@ driver more ``policy-free'':]
-
-The driver no longer has great wads of built-in options for its
-constituent phases (parser, Haskell compiler, C compiler, etc.). It
-is much easier to configure these (if required) at build time. A
-better idea, which we use, is to wired in very few options, but to use
-the ``make world'' machinery to ensure that the desired (wads of
-options) are always passed explicitly.
-
-%-------------------------------------------------------------------
-\item[-OC:] We got rid of the \tr{-OC} flag.
-\end{description}
-
-%************************************************************************
-%* *
-\subsection[0-08-new-in-compiler]{To do with the compiler proper}
-%* *
-%************************************************************************
-
-\begin{description}
-%-------------------------------------------------------------------
-\item[``Renamer'' is in:]
-Essentially, this implements module-system stuff. Checks for
-naming/scoping errors have been moved out of the typechecker into the
-renamer, which should be faster.
-
-%-------------------------------------------------------------------
-\item[Interface-file (\tr{.hi} file) generation:]
-It works.
-
-%-------------------------------------------------------------------
-\item[Ambiguous-type resolution:]
-It's basically right now, except that we still don't grok @default@
-declarations (which we have yet to see in a real Haskell program :-).
-
-%-------------------------------------------------------------------
-\item[Smaller C output:]
-
-%-------------------------------------------------------------------
-\item[Code generator:]
-Improvements related to the information carried around about closures
-(@ClosureInfo@ and @LambdaFormInfo@ types); matches the STG-machine paper.
-
-CAFs fully supported (notably updatable ones).
-
-Black-holing (at garbage-collection time) fully supported.
-
-%-------------------------------------------------------------------
-\item[Simplifiers:]
-Further work on the @Core@-to-@Core@ local-transformation passes (in
-\tr{compiler/simplCore/}). Also, we have added
-some @STG@-to-@STG@ transformations; for example, floating @lets@
-outward is most conveniently done at the STG level...
-
-%-------------------------------------------------------------------
-\item[Cost-centre-style profiling:]
-
-%-------------------------------------------------------------------
-\item[Improvements to basic datatypes:]
-Notably to @Ids@ (\tr{basicTypes/Id.lhs}) and names
-(\tr{basicTypes/NameTypes.lhs}). There is a new compiler-wide class,
-@NamedThing@ (\tr{utils/Outputable.lhs}). Essentially it is easier to
-ask of an entity: where did you come from? (e.g., PreludeCore?,
-imported?) what are you? (a data-constructor? a dictionary selector?)
-what is your original name? where are you going? (exported or not...)
-\end{description}
-
-%************************************************************************
-%* *
-\subsection[0-08-new-in-compiler-contrib]{In contributed bits of the compiler}
-%* *
-%************************************************************************
-
-\begin{description}
-\item[Evaluation-transformer bits:]
-Denis Howe has sent us an initial version (\tr{compiler/evalTran}).
-It isn't used by default, but is presumably play-withable...
-
-This @Core@-to-@Core@ transformation makes all lets of the form
-\begin{verbatim}
-let VAR = eval EXPR in ...
-\end{verbatim}
-strict. @eval@ is a dummy name which is thrown away (i.e., @eval x = x@).
-\end{description}
-
-Please send us {\em your} bits for next time!
-
-%************************************************************************
-%* *
-\subsection[0-08-new-in-libraries]{In @Prelude@ and runtime support}
-%* *
-%************************************************************************
-
-Prelude is 1.2.
-
-The compiler has basically all of @PreludeCore@ wired into it (see
-\tr{compiler/prelude/}); the rest is brought in with a straightforward
-\tr{import Prelude} (which brings in \tr{imports/Prelude.hi}). [The
-old \tr{MiniPrel*} files are dead and unmissed.]
-
-%************************************************************************
-%* *
-\subsection[0-08-new-in-mkworld]{In distribution/build/installation machinery}
-%* *
-%************************************************************************
-
-The ``make world'' machinery has many small improvements.
-
-It works notably better in a shared-symlink-tree world (which we use
-at Glasgow).
-
-We have abandoned efforts to use one build tree for making object
-files for multiple platforms. We can make simpler Makefiles as a
-result.
-
-There's a new standard setup, called \tr{fast}. The name is
-inappropriate at present, but it is intended to be for people who
-value compiled-code-speed above all else (within reason, of course).
-
-%************************************************************************
-%* *
-\subsection[0-08-new-misc]{Miscellaneous new things}
-%* *
-%************************************************************************
-
-New version of Denis Howe's \tr{mira2hs} script.
diff --git a/ghc/docs/release_notes/0-10-notes.lit b/ghc/docs/release_notes/0-10-notes.lit
deleted file mode 100644
index 9048e8a1bc..0000000000
--- a/ghc/docs/release_notes/0-10-notes.lit
+++ /dev/null
@@ -1,72 +0,0 @@
-Release~0.10 was the first major, public release of this compilation
-system.
-
-The announcement (now distributed in \tr{ghc/docs/ANNOUNCE-0.10})
-describes the most notable features of this release. These notes,
-therefore, just cover some of the fine points...
-
-%************************************************************************
-%* *
-\subsection[0-10-new-docs]{New documentation}
-%* *
-%************************************************************************
-
-We're providing a few more papers, in \tr{ghc/docs/papers}. See
-\tr{ghc/docs/README} for a full list of documentation.
-
-%************************************************************************
-%* *
-\subsection[0-10-new-in-usage]{User-visible changes}
-%* *
-%************************************************************************
-
-An ``Appel-style'' generational garbage collector is now the default.
-(It used to be a two-space copying collector.)
-
-The flag to use the unboxery and other Glasgow extensions was
-\tr{-funboxed}. We've changed it to \tr{-fglasgow-exts}. We may
-elaborate this further, eventually...
-
-(If 0.06 is the last version you looked at, flags/options have changed
-a fair bit since then.)
-
-%************************************************************************
-%* *
-\subsection[0-10-new-in-compiler]{New in the compiler proper}
-%* *
-%************************************************************************
-
-Derived instances are in, well partly. We'll put in the rest when
-there's a demand (or we have nothing better to do).
-
-@Integers@ (arbitrary-precision integers) are now in properly.
-Just as HBC does, we use the GNU multi-precision arithmetic package.
-Source is in \tr{ghc/runtime/gmp}.
-
-We did a bunch of stuff in the typechecker region to support
-overloading better; we called it ``dictionary stomping.'' One
-side-effect of this work is that error messages related to overloading
-have a slight chance of being sensible (which they weren't before).
-
-``Primitive arrays,'' on top of which efficient, regular Haskell
-arrays can be (are) built, went in. There's a {\em little} about
-using them, in the ``Glasgow extensions'' section of the User's Guide.
-
-Similarly, the mechanisms for calling C directly (@ccall@ and @casm@)
-are more likely to be useful. Again, a little documentation in the
-same place...
-
-%************************************************************************
-%* *
-\subsection[0-10-new-in-libraries]{In the prelude and runtime support}
-%* *
-%************************************************************************
-
-Our standard prelude conforms to the Haskell~1.2 report.
-
-We support a non-standard @fromInt@ method for the @Num@ class, just as
-HBC does.
-
-We support a non-standard @cmp3@ method for the @Ord@ class. Snoop
-around in the \tr{ghc/lib/prelude/*.hs} code, if you care. (We use it
-in code generated for derived instances.)
diff --git a/ghc/docs/release_notes/0-16-notes.lit b/ghc/docs/release_notes/0-16-notes.lit
deleted file mode 100644
index ba2d504062..0000000000
--- a/ghc/docs/release_notes/0-16-notes.lit
+++ /dev/null
@@ -1,106 +0,0 @@
-Release~0.16 was the second public release of this compilation system.
-It was primarily a bug-fixing and ``solidifying'' release.
-
-The announcement for this release is distributed as \tr{ANNOUNCE-0.16}
-in the top-level directory.
-
-%************************************************************************
-%* *
-\subsection[0-16-new-docs]{New documentation}
-%* *
-%************************************************************************
-
-We're providing a few more papers, in \tr{ghc/docs/papers}. See
-\tr{ghc/docs/README} for a full list of documentation.
-
-%************************************************************************
-%* *
-\subsection[0-16-new-in-compiler]{New in the compiler proper}
-%* *
-%************************************************************************
-
-New strictness analyser and update analyser; their use will be
-reflected in the pragmas in your interface files. The format of these
-interface pragmas {\em will probably change}.
-
-Errors related to misuse of Prelude identifiers are more likely to be
-caught.
-
-For some unknown reason, our ``wired-in'' default declaration in 0.10 was
-\tr{default (Integer,Double)}. We changed it to
-\tr{default (Int,Double)}, as the Report suggests (which is less safe).
-
-We've switched from basing our derived instances on a non-standard
-@cmp3@ method (class @Ord@), to basing them on another non-standard
-method @tagCmp@. The relevant types and things are...
-\begin{verbatim}
-cmp3 :: b -> b -> b -> a -> a -> b
-
-tagCmp :: a -> a -> CMP_TAG
-data CMP_TAG = LT_TAG | EQ_TAG | GT_TAG
-\end{verbatim}
-If you peer into the \tr{ghc/lib/prelude/*.hs} code, it will be
-obvious what's going on here. We hope to make further improvements
-on comparison code in the foreseeable future.
-
-%************************************************************************
-%* *
-\subsection[0-16-new-in-libraries]{In the prelude and runtime support}
-%* *
-%************************************************************************
-
-The libraries underpinning Glasgow monadic I/O, sequencing, primitive
-arrays, and variables have been reworked, with some consequent
-changes. If you encounter difficulties, you should consult the
-@PreludeGlaIO.hi@ and @PreludeGlaArray.hi@ interfaces in your
-\tr{imports} directory.
-
-Andy Gill's proposal for access to standard Haskell I/O functions from
-the monadic I/O world has been implemented. So you have functions
-such as @getArgsIO@, @appendChanIO@, etc., etc.
-
-The stuff you used to get from @Stdio.hi@ now comes directly from
-@PreludeGlaIO.hi@.
-
-The @packString#@ function has been moved into a module of its own,
-@PackedString@, and {\em its type has changed}. The functions now in
-that module are (to be elaborated...):
-\begin{verbatim}
-packString :: String -> PackedString
-packString# :: String -> Arr# Char#
-\end{verbatim}
-The latter is very useful to preparing @String@ arguments to pass to C.
-
-The HBC library modules that compile readily with GHC are available,
-you'll need to give a \tr{-lHShbc} option to the driver. These
-modules are:
-\begin{verbatim}
-Either, Hash, ListUtil, Maybe, Miranda, Number, Parse, Pretty, QSort,
-Random, Time, Word
-\end{verbatim}
-
-The GNU multi-precision (GMP) package which underpins our @Integer@
-support has been upgraded to version 1.3.2.
-
-%************************************************************************
-%* *
-\subsection[0-16-new-elsewhere]{New elsewhere}
-%* *
-%************************************************************************
-
-0.16 has a new and much uglier ``assembler mangler''
-(\tr{ghc/driver/ghc-asm-*.lprl}), which is what converts GCC-produced
-assembly-language output into the stuff you actually run. Besides
-throwing away function prologues/epilogues, it parks ``info tables''
-next to entry code, and fast-entry code right next to slow-entry code.
-
-The net effect of this assembler-mangler is that there is {\em very
-little runtime penalty} for compiling via GCC.
-
-The way we go about mapping ``STG registers'' to real machine
-registers (\tr{ghc/imports/StgRegs.lh}) is different. It should be
-particularly better for machines with few registers (though we still
-don't have a good solution for x86 machines).
-
-We can now ``steal'' caller-save registers; in the past, we could only
-steal callee-save registers.
diff --git a/ghc/docs/release_notes/0-17-notes.lit b/ghc/docs/release_notes/0-17-notes.lit
deleted file mode 100644
index 5528f2a7b5..0000000000
--- a/ghc/docs/release_notes/0-17-notes.lit
+++ /dev/null
@@ -1 +0,0 @@
-Added @getProgNameIO@ and @getProgNameIOE@ (inadvertently omitted).
diff --git a/ghc/docs/release_notes/0-18-README b/ghc/docs/release_notes/0-18-README
deleted file mode 100644
index dc6ec5f953..0000000000
--- a/ghc/docs/release_notes/0-18-README
+++ /dev/null
@@ -1,63 +0,0 @@
-This is version 0.18 of the Glasgow Haskell compiler.
-
-0.18 is an "internal" release intended *ONLY* for those actually
-hacking on the compiler source -- that is, those who *REALLY* know
-what they are doing. Anyone else is crazy to use it; anyone who uses
-it without keeping a "real" GHC release (0.16 or 0.17) around is
-obviously demented.
-
-The chances of a "clean" build are near zero, no matter what Haskell
-compiler you build with. Be prepared to utter magic incantations.
-(For example, `make reader/ReadPragmas.o
-EXTRA_HC_OPTS="-fno-strictness -fno-specialise -fno-case-of-case"'.)
-
-An incomplete "what's new" list:
-
-* Unfoldings across module boundaries. Still v limited.
-
-* Specialisation of overloaded functions. Instances -- not yet.
-
-* Strictness analyser that handles "nested" strictness and does
- "absence analysis" as well. Makes Prelude.hi fun to read. Hints:
- _N_ = nothing, _A_ = arity, _U_ = update analysis info, _S_ =
- strictness (U = unpack, A = absent, L = lazy, S = strict, E = strict
- & enumeration type, P = primitive).
-
-* Prelude info no longer horribly built into the compiler (as much).
- Manipulating the prelude is not nearly so delicate as it once was.
-
-* Some names have changed: MkChar => C#, MkInt => I#, MkFloat => F#,
- MkDouble => D#, MkInteger => J#. (They won't change again...)
-
-* Includes Patrick Sansom's array-based substitution code (much faster
- typechecking). (You probably won't see the speedup, though, because
- we've spent all the savings on fancier intermodule stuff.)
-
-* We've added a Core "lint" pass, which can be used to check
- types/out-of-scope-errors/etc after every Core-to-Core pass. It's
- great! -dcore-lint.
-
-* Initial "Native" class support; use "-syslib hbc".
-
-* Lots of compiler code hacked on, for better or worse.
-
-* Lots of gratuitous "trace" messages when running the compiler :-)
-
-Documentation is unchanged since 0.1[67]. There is not one word about
-any new features.
-
-We *hope* for a new public release before Christmas (1993).
-
-Will Partain
-Keeper of the Bits, AQUA Project
-
-Dated: 93/11/04
-
-E-mail contacts:
- glasgow-haskell-bugs@dcs.glasgow.ac.uk (bug reports)
- glasgow-haskell-request@dcs.glasgow.ac.uk (general queries)
-
-Anonymous FTP site: ftp.dcs.glasgow.ac.uk:pub/haskell/glasgow. Mostly
-mirrored by ftp.cs.chalmers.se and nebula.cs.yale.edu (same
-directory). Also: src.doc.ic.ac.uk, in
-computing/programming/languages/haskell/glasgow/.
diff --git a/ghc/docs/release_notes/0-19-notes.lit b/ghc/docs/release_notes/0-19-notes.lit
deleted file mode 100644
index 66c102453c..0000000000
--- a/ghc/docs/release_notes/0-19-notes.lit
+++ /dev/null
@@ -1,187 +0,0 @@
-Release~0.19 was the third public release of this compilation system.
-It incorporates our new work for the last half of 1993.
-
-The announcement for this release is distributed as \tr{ANNOUNCE-0.19}
-in the top-level directory.
-
-%************************************************************************
-%* *
-\subsection[0-19-user-visible]{User-visible changes in 0.19, including incompatibilities}
-%* *
-%************************************************************************
-
-You'll need to recompile everything if you're switching from a
-previous version of GHC. (If you don't, you'll get ``consistency
-errors''.)
-
-Default declarations: in.
-
-Derived instances of \tr{Ix} and \tr{readsPrec} (\tr{Text} class): in.
-(Random Prelude instances of weird things: in.) You can avoid the
-\tr{readsPrec} methods by using the \tr{-fomit-derived-read} option.
-
-Should be {\em faster}, for two reasons: (1)~A native-code generator
-for the SPARC architecture (avoids C compilation time); (2)~an
-array-based [vs naive list-based...] substitution mechanism in the
-typechecker. Using \tr{-O2} or \tr{-fvia-C} avoids the native-code
-generator.
-
-(Shouldn't be too much faster, because we spent a lot of the winnings
-:-()
-
-\tr{MkInt} and friends {\em renamed}: \tr{MkInt}, \tr{MkChar},
-\tr{MkFloat}, \tr{MkDouble}, and \tr{MkInteger} are now \tr{I#},
-\tr{C#}, \tr{F#}, \tr{D#}, and \tr{J#}, respectively.
-We won't change them again, we promise.
-
-\tr{-i}/\tr{-I} flags changed: You used to specify directories to
-search for interface files with \tr{-I <dir>}; now you do it with
-\tr{-i<dir>} [{\em no space after the \tr{-i}}] (same as HBC).
-\tr{-I} is reserved for when doing \tr{-cpp} and for the C compiler,
-when it is run.
-
-Renaming, feature horribilis that it is, is more-or-less fully
-implemented. The User's Guide lists one or two exceptions.
-
-Specialised versions of overloaded functions: these are created
-automagically with \tr{-O}, and also when you ask for them with
-\tr{SPECIALIZE} pragmas. See the User's Guide for how to do this
-(same as HBC). (We don't have specialised instance declarations yet.)
-
-GHC tries hard to do inlining (or ``unfolding'') across module
-boundaries; just look at \tr{-O}-produced interface files. You can
-enliven this process with \tr{INLINE} pragmas.
-
-The \tr{__GLASGOW_HASKELL__} CPP directive is only set when
-pre-processing Haskell source (and not when pre-processing generated
-C).
-
-Revised scheme for using system-provided libraries (e.g., the HBC
-library). Just use a \tr{-syslib <name>} option when compiling and
-linking. See the User's Guide for details.
-
-%************************************************************************
-%* *
-\subsection[0-19-new-docs]{New documentation}
-%* *
-%************************************************************************
-
-See \tr{ghc/docs/README} for a full list of documentation.
-
-The updated User's Guide has new sections including: (a)~advice for
-creating smaller and faster programs more quickly, and (b)~about the
-HBC library [stolen documentation].
-
-We've dropped papers from the distribution (they're big, and you can
-get them easily anyway); instead, we provide abstracts of all
-documents about all relevant work at Glasgow; see
-\tr{ghc/docs/abstracts}.
-
-New: ``A Simple Country Boy's Guide to Monadic-Style Programming'' (Will
-Partain). In \tr{ghc/docs/simple-monad.lhs}.
-
-%************************************************************************
-%* *
-\subsection[0-19-new-in-compiler]{New in the compiler proper}
-%* *
-%************************************************************************
-
-Strictness analyser: produces/handles ``nested'' strictness -- e.g.,
-\tr{U(SLL)} means ``this single-constructor argument is strict, and it
-is also strict in its first component.'' There's also ``absence
-analysis'' in there: \tr{U(ASA)} means ``strict in the second
-component, and the first/third are not used at all.''
-
-New simplifier: the program-transformation engine in the middle of the
-compiler. The ``old simplifier,'' primarily the work of Andr\'e
-Santos, has retired to an Old Simplifier's Home on the coast of
-Brazil, where it is collecting a well-deserved monadic pension.
-
-%************************************************************************
-%* *
-\subsection[0-19-new-in-libraries]{In the prelude and runtime support}
-%* *
-%************************************************************************
-
-A couple of new functions in the @PackedString@ module that comes with
-the system. Mentioned in the User's Guide.
-
-The HBC library has been upgraded to match the latest HBC release
-(0.999.5). We now support the \tr{Native} and \tr{NameSupply}
-modules, which we didn't before.
-
-Alastair Reid's implementation of ``stable pointers,'' which he uses
-to do callbacks with the X Window System (yow!), is in. I (WDP) don't
-know about documentation.... send mail if you need to know.
-
-%************************************************************************
-%* *
-\subsection[0-19-new-ports]{In the porting department}
-%* *
-%************************************************************************
-
-We use Sun4s running SunOS~4.1.3, so those are the best-supported
-machines. For these, we have a native-code generator (the best); next
-best is a ``registerised'' port; the bare minimum is an
-``unregisterised'' port.
-
-The 0.19 infrastructure for ``stealing'' registers for a registerised port
-(using a GCC extension) is much more robust---take note, brave porters.
-
-Here's everying that's known about the porting world:
-\begin{description}
-%-------------------------------------------------------------------
-\item[Sun3 running SunOS~4.1.3:]
-A registerised port is done; could be made available.
-
-%-------------------------------------------------------------------
-\item[GRIP multiprocessor:]
-68020-based multiprocessor for running parallel Haskell programs.
-A registerised port is done; too bad we have the only machine!
-If you have something parallel you {\em really} wanted to run on it,
-please get in touch with us.
-
-%-------------------------------------------------------------------
-\item[HP-PA box running HP/UX:]
-An unregisterised port of 0.17 (0.16+portability fixes) seems to
-work, except that floating-point is definitely busted. 0.19~should be
-no worse.
-
-%-------------------------------------------------------------------
-\item[DECstation (MIPS-based):]
-An unregisterised port of 0.17 works; 0.19~should be the same.
-
-%-------------------------------------------------------------------
-\item[DEC Alpha running OSF/1:]
-We've done an unregisterised port (unreleased), and a registerised
-port is not far off.
-
-%-------------------------------------------------------------------
-\item[Sun running Solaris 2.x:]
-We've started on this ourselves and foresee no obstacle to a
-``registerised'' port. Not sure about native-code...
-
-%-------------------------------------------------------------------
-\item[x86 PCs running Linux:]
-This really needs a native-code generator to be viable. We hope the
-elves will give us one for Christmas!
-
-%-------------------------------------------------------------------
-\item[Macintosh, using MPW:]
-As mind-blowing at it may seem, David Wright in Tasmania has actually
-gotten GHC to run on a Macintosh. I believe it's still in the ``you
-can do it, but you don't want to'' stage.
-\end{description}
-
-%************************************************************************
-%* *
-\subsection[0-19-new-elsewhere]{New elsewhere}
-%* *
-%************************************************************************
-
-In the ``literate programming'' stuff that happens to come with GHC: a
-few bug fixes, plus a significant contribution from Chris Toshok
-(\tr{toshok@cs.uidaho.edu}) of ``lit2html'' stuff; i.e., to convert
-your literate programs into HTML, the Hypertext Markup Language used
-on the World-Wide Web. I (WDP) am not sure it's completely finished,
-or exactly what you invoke to do what, but it seems Way Cool.
diff --git a/ghc/docs/release_notes/0-22-notes.lit b/ghc/docs/release_notes/0-22-notes.lit
deleted file mode 100644
index aa9e72205b..0000000000
--- a/ghc/docs/release_notes/0-22-notes.lit
+++ /dev/null
@@ -1,205 +0,0 @@
-Release~0.22 is the fourth public release of Glasgow Haskell.
-It incorporates our new work for the first half of 1994.
-
-The announcement for this release is distributed as \tr{ANNOUNCE-0.22}
-in the top-level directory.
-
-%************************************************************************
-%* *
-\subsection[0-22-ports]{What machines GHC~0.22 runs on}
-%* *
-%************************************************************************
-
-We use Sun4s running SunOS~4.1.3 and DEC~Alphas running OSF/1~V2.0, so
-those are the ``fully-supported'' platforms, unsurprisingly. For
-Sun4s, we have a native-code generator, which makes for somewhat
-quicker compilations. (We actually produce better code by compiling
-intermediate C with GCC.)
-
-The GHC hierarchy of Porting Goodness: (a)~Best is a native-code
-generator [only for Sun4s, now]; (b)~next best is a ``registerised''
-port; (c)~the bare minimum is an ``unregisterised'' port.
-``Unregisterised'' Haskell programs are much bigger and slower,
-but the port is much easier to get going.
-
-Here's everything that's known about GHC ports, as of 0.22:
-\begin{description}
-%-------------------------------------------------------------------
-\item[Sun4 running SunOS~4.1.3:]
-Fully supported, including native-code generator.
-
-%-------------------------------------------------------------------
-\item[DEC Alpha running OSF/1 V2.0:]
-Fully supported, but no native-code generator (none planned).
-
-%-------------------------------------------------------------------
-\item[Sun3 running SunOS~4.1.3:]
-GHC~0.22 should work, registerised. (0.21 did work.)
-
-%-------------------------------------------------------------------
-\item[Sun4 running Solaris 2.x:]
-We expect to finish a ``registerised'' port ourselves, in the
-foreseeable future. Feel free to ask about it, if interested. Not
-sure about native-code...
-
-%-------------------------------------------------------------------
-\item[HP-PA box running HP/UX 9.x:]
-An unregisterised port of 0.21 (last ``internal'' release before 0.22)
-seems to work, except that floating-point is definitely busted.
-0.22~should be the same.
-
-%-------------------------------------------------------------------
-\item[Silicon Graphics box running IRIX 5.x:]
-An unregisterised port of 0.21
-seemed to work. 0.22~should be the same.
-
-%-------------------------------------------------------------------
-\item[DECstation (MIPS-based):]
-An unregisterised port back around the time of 0.17 seemed to work;
-0.22~should be the same, modulo a little bit-rot.
-
-%-------------------------------------------------------------------
-\item[x86 PCs running Linux/NetBSD/FreeBSD:]
-This really needs a native-code generator to be viable. No
-recent progress.
-
-%-------------------------------------------------------------------
-\item[GRIP multiprocessor:]
-GRIP is a 68020-based multiprocessor for running parallel Haskell
-programs; too bad we have the only machine! We run GHC~0.16 on it,
-with no plans to upgrade.
-
-We are working on other parallel stuff. Stay tuned.
-
-%-------------------------------------------------------------------
-\item[NeXT box running whatever NeXTs run:]
-Carsten Schultz succeeded with a ``registerised'' port of GHC~0.19.
-There's probably a little bit-rot since then, but otherwise it should
-still be fine.
-
-%-------------------------------------------------------------------
-\item[Macintosh, using MPW:]
-As mind-blowing at it may seem, David Wright in Tasmania has actually
-gotten GHC to run on a Macintosh. Ditto James Thomson here at Glasgow.
-You may be able to get Thomson's from here. (Not sure that it will
-excite you to death, but...)
-\end{description}
-
-%************************************************************************
-%* *
-\subsection[0-22-user-visible]{User-visible changes in 0.22, including incompatibilities}
-%* *
-%************************************************************************
-
-You'll need to recompile everything if you're switching from a
-previous version of GHC. (If you don't, you'll get ``consistency
-errors''.)
-
-Lazy pattern-matching (e.g., \tr{let (x,y) = ... in ...}) no longer
-carries with it the threat of a space leak. (It used to be that, as
-long as {\em either} of \tr{x} or \tr{y} was ``live,'' the storage
-manager would hang onto {\em both} chunks of graph.) No longer.
-
-We've done a complete overhaul of the state-transformer stuff which
-underlies our array, I/O, and C-calling support. The ``state
-interface document,'' distributed in \tr{ghc/docs/state-interface.dvi}
-defines what we have done. You may wish to check our abstracts
-(\tr{ghc/docs/abstracts/}) to find papers about this stuff. If you've
-written code that grovels around in GHC innards (e.g., uses
-``primitive operations'' directly), it will probably need updating.
-
-We do {\em not} support Haskell~1.3 monadic I/O (any draft version),
-but we will once the dust settles. We still support the
-\tr{PreludeGlaIO} interface that we have had for some time.
-
-You can now build GHC to support ``threaded'' execution. (Configure
-\tr{--with-threads=yes}, then use GHC with a \tr{-threads} option.)
-Using the \tr{_seq_} and \tr{_par_} constructs,
-
-GHC does a better job of not stealing from the user's name space (for
-its own extensions, etc.). For example, the ``set cost-centre''
-keyword is now \tr{_scc_}, rather than \tr{scc} (though the latter
-will continue to be accepted for a while). With the
-\tr{-fglasgow-exts} flag on, names may begin with an underscore
-(\tr{_}).
-
-We have friendly interaction between ``Haskell land'' and ``C land''
-via (a)~{\em stable pointers} (pass Haskell pointers to C and have the
-garbage-collector not forget about them); and (b)~{\em malloc
-pointers} (return C pointers to Haskell and tell Haskell ``throw this
-away when you're finished with it''). See the User's Guide for more
-info.
-
-%************************************************************************
-%* *
-\subsection[0-22-support]{New in support tools (e.g., profiling)}
-%* *
-%************************************************************************
-
-The profiling system of GHC has been improved in version~0.22 in the
-following ways:
-\begin{description}
-\item[Now uses the ``hybrid scheme'':] (Rather than pure ``lexical
-scoping'') What this means for you: ``CAF'' cost-centres will no
-longer be blamed for gigantic chunks of the time in your program.
-
-\item[Uses the generational garbage-collector:] (At least when doing
-time profiling) It used to use a two-space copying GC; it still does
-when space profiling. You should be able to profile larger programs.
-\end{description}
-
-%************************************************************************
-%* *
-\subsection[0-22-new-in-compiler]{New in the compiler proper}
-%* *
-%************************************************************************
-
-The ``simplifier''---the program-transformation engine in the middle
-of the compiler---has settled down (at least until Simon has another
-Brain Wave). We've made ``per-simplification'' flags, so that each
-run of the simplifier can be controlled separately---this allows very
-precise control. (But makes it pretty hard to exercise any control
-from the command-line.) More in the docs.
-
-Related to simplifier stuff was a revision of the ``unfoldery''
-machinery. We try very hard to find and exploit unfolding (or
-inlining), including across module boundaries.
-
-%************************************************************************
-%* *
-\subsection[0-22-new-in-libraries]{In the prelude and runtime support}
-%* *
-%************************************************************************
-
-We've introduced a ``GHC system library,'' similar to the ``HBC system
-library'' which we have supported for some time. Just say
-\tr{-syslib ghc} and the GHC library stuff is at your fingertips.
-See the User's Guide for exactly what's on offer (not a lot right now,
-but more will show up).
-
-The @PackedString@ module that comes with the system is even beefier
-than before; see the User's Guide. (This module really should be
-in the GHC library.)
-
-%************************************************************************
-%* *
-\subsection[0-22-new-elsewhere]{Other new stuff}
-%* *
-%************************************************************************
-
-We have two new mailing lists about Glasgow Haskell.
-\begin{description}
-\item[glasgow-haskell-users:]
-This list is for GHC users to chat among themselves. Subscribe by
-sending mail to \tr{glasgow-haskell-users-request@dcs.glasgow.ac.uk}.
-Messages for the list go to \tr{glasgow-haskell-users}.
-
-\item[glasgow-haskell-bugs:]
-This used to be an address for some lonely person who received bug
-reports. It is now a mailing list for the sort of people who discuss,
-well, bug reports when they go to a party.
-
-Subscribe via \tr{glasgow-haskell-bugs-request@dcs.glasgow.ac.uk};
-send bug reports and rumination thereupon go to
-\tr{glasgow-haskell-bugs}.
-\end{description}
diff --git a/ghc/docs/release_notes/0-23-notes.lit b/ghc/docs/release_notes/0-23-notes.lit
deleted file mode 100644
index 196592c84b..0000000000
--- a/ghc/docs/release_notes/0-23-notes.lit
+++ /dev/null
@@ -1,253 +0,0 @@
-Release~0.23 is the fifth public release of Glasgow Haskell.
-It incorporates our new work for the second half of 1994.
-
-The announcement for this release is distributed as \tr{ANNOUNCE-0.23}
-in the top-level directory.
-
-%************************************************************************
-%* *
-\subsection[0-23-ports]{What machines GHC~0.23 runs on}
-%* *
-%************************************************************************
-
-NOTE: the porting situation is essentially unchanged between 0.22 and
-0.23, except for adding the native-code generator for DEC Alphas.
-
-We use Sun4s running SunOS~4.1.3 and DEC~Alphas running OSF/1~V2.0, so
-those are the ``fully-supported'' platforms, unsurprisingly. Both
-have native-code generators, for quicker compilations.
-
-The GHC hierarchy of Porting Goodness: (a)~Best is a native-code
-generator; (b)~next best is a ``registerised''
-port; (c)~the bare minimum is an ``unregisterised'' port.
-``Unregisterised'' Haskell programs are much bigger and slower,
-but the port is much easier to get going.
-
-Here's everything that's known about GHC ports, as of 0.23:
-\begin{description}
-%-------------------------------------------------------------------
-\item[Sun4 running SunOS~4.1.3 (\tr{sparc-sun-sunos4}):]
-Fully supported, including native-code generator.
-
-%-------------------------------------------------------------------
-\item[Sun4 running Solaris 2.x (\tr{sparc-sun-solaris2}):]
-Fully supported, including native-code generator.
-(NB: not tested before release.)
-
-%-------------------------------------------------------------------
-\item[DEC Alpha running OSF/1 V2.0 (\tr{alpha-dec-osf1}):]
-Fully supported, including native-code generator.
-
-%-------------------------------------------------------------------
-\item[Sun3 running SunOS~4.1.3 (\tr{m68k-sun-sunos4}):]
-GHC~0.23 works registerised. No native-code generator.
-
-%-------------------------------------------------------------------
-\item[HP-PA box running HP/UX 9.x:]
-An unregisterised port of 0.21 (last ``internal'' release before 0.23)
-seems to work, except that floating-point is definitely busted.
-0.23~should be the same.
-
-%-------------------------------------------------------------------
-\item[Silicon Graphics box running IRIX 5.x:]
-An unregisterised port of 0.21
-seemed to work. 0.23~should be the same.
-
-%-------------------------------------------------------------------
-\item[DECstation (MIPS-based):]
-An unregisterised port back around the time of 0.17 seemed to work;
-0.23~should be the same, modulo a little bit-rot.
-
-%-------------------------------------------------------------------
-\item[x86 PCs running Linux/NetBSD/FreeBSD:]
-This really needs a native-code generator to be viable. No
-recent progress.
-
-%-------------------------------------------------------------------
-\item[GRIP multiprocessor:]
-GRIP is a 68020-based multiprocessor for running parallel Haskell
-programs; too bad we have the only machine! We run GHC~0.16 on it,
-with no plans to upgrade.
-
-We are working on other parallel stuff. Stay tuned.
-
-%-------------------------------------------------------------------
-\item[NeXT box running whatever NeXTs run:]
-Carsten Schultz succeeded with a ``registerised'' port of GHC~0.19.
-There's probably a little bit-rot since then, but otherwise it should
-still be fine. Had a report that things were basically OK at 0.22.
-
-%-------------------------------------------------------------------
-\item[Macintosh, using MPW:]
-As mind-blowing at it may seem, David Wright in Tasmania has actually
-gotten GHC to run on a Macintosh. Ditto James Thomson here at Glasgow.
-You may be able to get Thomson's from here. (Not sure that it will
-excite you to death, but...)
-\end{description}
-
-%************************************************************************
-%* *
-\subsection[0-23-config]{New configuration things in 0.23}
-%* *
-%************************************************************************
-
-Essentially, upgraded to Autoconf~2. Probably the easiest way to see
-what all the options are now is to type \tr{./configure --help} and
-look at the stuff near the end.
-
-%************************************************************************
-%* *
-\subsection[0-23-user-visible]{User-visible changes in 0.23, including incompatibilities}
-%* *
-%************************************************************************
-
-You'll need to recompile everything if you're switching from a
-previous version of GHC. (If you don't, you'll get ``consistency
-errors''.) Some day, we will stop doing this to you :-)
-
-Monadic I/O has taken yet another shake-up; that is outlined in the
-next section.
-
-To use the 1.3-DRAFT I/O features, you use a \tr{-fhaskell-1.3} flag.
-This also nets you, from your Prelude, the \tr{Maybe} and \tr{Either}
-types, and the functions \tr{thenMaybe}, \tr{curry}, and \tr{uncurry}.
-
-The driver supports a heap-and-stack-sizes scaling flag. For example,
-\tr{-Rscale-sizes2} would cause the driver to use twice as much
-heap/stack space as it would otherwise. This is a convenient way to
-move between machines with differing memory setups (e.g., 32-bit vs
-64-bit) without changing millions of -H flags in a Makefile. Note:
-something like \tr{-Rscale-sizes1.5} is OK, too.
-
-``Lit-lit'' literals are now overloaded. They can be any
-\tr{_CCallable} type, not just \tr{_Addrs}. The price of this extra
-convenience is that you sometimes have to insert a type signature.
-
-The shift-right primitive-operation, \tr{shiftR#}, has been renamed
-and clarified to \tr{shiftRA#} (arithmetic). A new prim-op
-\tr{shiftRL#} (logical) has been added.
-
-Comparable shift primitive-ops on \tr{Int#s} (rather than \tr{Word#s})
-have been added: \tr{iShiftL#}, \tr{iShiftRA#}, and \tr{iShiftRL#}.
-Long live high-level languages!
-
-%************************************************************************
-%* *
-\subsection[0-23-io]{New in I/O, esp. ``monadic,'' esp. ``1.3''}
-%* *
-%************************************************************************
-
-GHC~0.23 is still a Haskell~1.2 compiler. Do nothing weird, and it
-should work exactly as before.
-
-If you give GHC a \tr{-fhaskell-1.3} flag (both compile and link time,
-please!), it will use a VERY EARLY, LARGELY UNTESTED implementation of
-the DRAFT 1.3 I/O PROPOSAL.
-
-The \tr{PreludeGlaIO} interface, which was based on a long-ago 1.3 I/O
-proposal, is DEAD. It was in a pretty bad state, anyway.
-Putting \tr{PreludeGlaIO} code through as 1.3 code, I got pretty
-far with just these few impedance-matching definitions:
-\begin{verbatim}
-> type Void = ()
-> returnIO = return
-> thenIO = (>>=)
-> mapIO :: (a -> IO b) -> [a] -> IO [b]
-> mapIO f = accumulate {-was:listIO-} . map f
-\end{verbatim}
-
-We supply the DRAFT 1.3 I/O PROPOSAL in \tr{ghc/docs/io-1.3/}.
-It is in HTML format.
-
-We still give access to our underlying \tr{PrimIO} monad, via the
-\tr{PreludePrimIO} interface. This is the level at which \tr{_ccall_s}
-operate. It should still be quite solid, and counts as a good fall-back
-position when the 1.3-DRAFT stuff dies on you. See the User's Guide.
-
-%************************************************************************
-%* *
-\subsection[0-23-support]{New in support tools (e.g., profiling)}
-%* *
-%************************************************************************
-
-The reports from profiling should be a bit tidier. The ``automagic''
-cost-centres for, e.g., ``all the CAFs in module X'', will now be
-reported against \tr{CAFs_in_... X}. Which seems fair enough.
-
-GHCI---an INTERPRETER for Glasgow Haskell! The brainchild and work of
-Alastair Reid, before he defected to the Enemy at Yale. Accepts full
-Glasgow Haskell, including many extensions. Can mix interpreted and
-compiled code, the Prelude being a notably case of the latter.
-MASSIVE HACK VALUE! The problem is it doesn't quite compile under
-0.23 (I ran out of time), and some of its dodgy bits (used to mix
-interpreted and compiled code) need upgrading to work with the new
-info tables. It lives in \tr{ghc/compiler} and below, notably the
-\tr{interpreter} subdirectory. Don't be shy now---roll up your
-sleeves and strut your hacking stuff!
-
-%************************************************************************
-%* *
-\subsection[0-23-new-in-compiler]{New in the compiler proper}
-%* *
-%************************************************************************
-
-The compiler is quite a bit faster at compiling, {\em without}
-\tr{-O}. We are in the HBC league now. I don't remember all the
-HACKs that we threw in to make this happen :-)
-
-New-format ``info tables'' (work by Bryan O'Sullivan and friends).
-Essentially, static info used by the garbage-collector has been moved
-one indirection further away, into a ``rep table,'' of which there are
-a fixed number. So the same GC info isn't replicated over and over
-again. This is our main space-savings gain in 0.23.
-
-A native-code generator for the DEC Alpha. Jim Mattson did it in one
-weekend. What a great system!
-
-Rather than have a separate Yacc-parser process that spews a long
-``prefix form'' string into the compiler, the compiler now just does a
-\tr{_ccall_ yyparse} and then walks the resulting parse tree directly.
-(Not quite {\em that} simple, but... still pretty cool.)
-
-A {\em selective} lambda-lifter. (Simon is very excited about its
-selectiveness.) That means it only does lambda-lifting if there is a
-benefit in doing so. It's done on the STG syntax (quite late in the
-compilation).
-
-%************************************************************************
-%* *
-\subsection[0-23-new-in-libraries]{In the prelude and runtime support}
-%* *
-%************************************************************************
-
-PackedStrings (now called \tr{_PackedString}s) are now a built-in
-type, just like \tr{Int}s, say. Their interface is described with the
-Glasgow extensions in the User's Guide. There is also a
-``extensions-free'' interface (no underscores on the front of names)
-which you can get at as a system library (\tr{-syslib ghc}).
-
-The pretty-printing code that we use in GHC is now available in the
-GHC system library (\tr{-syslib ghc} and \tr{import Pretty}). We
-would claim it is more ``industrial strength'' than the one in the HBC
-system library...
-
-Because of name-grabbing by the DRAFT-1.3-I/O, two functions in the
-HBC library's \tr{Parse} module have been renamed: \tr{(>>)} is now
-\tr{act}, and \tr{fail} is now \tr{failP}. (We will change these
-again if Lennart does something differently.)
-
-%************************************************************************
-%* *
-\subsection[0-23-new-elsewhere]{Other new stuff}
-%* *
-%************************************************************************
-
-We've added a new utility, \tr{pphs}, for pretty-printing Haskell code
-in LaTeX documents. It was written by Andrew Preece, a student at
-Glasgow. The code is in \tr{ghc/CONTRIB/pphs}.
-
-Over in literate-land, we've hidden a copy of a slightly-tweaked
-\tr{texi2html} script (in \tr{literate/texi2html/texi2html}). This is
-probably the most painless way to turn ``literate'' things into
-Webbable HTML documents. (Use our literate stuff to make Texinfo
-files, then convert with \tr{texi2html}.) NB: not really tested.
diff --git a/ghc/docs/release_notes/0-26-notes.lit b/ghc/docs/release_notes/0-26-notes.lit
deleted file mode 100644
index b10c7e1c40..0000000000
--- a/ghc/docs/release_notes/0-26-notes.lit
+++ /dev/null
@@ -1,244 +0,0 @@
-Release~0.26 is a major public release of Glasgow Haskell.
-It incorporates our new work for the first half of 1995.
-
-The announcement for this release is distributed as \tr{ANNOUNCE-0.26}
-in the top-level directory.
-
-You'll need to recompile everything if you're switching from a
-previous version of GHC. (If you don't, you'll get ``consistency
-errors''.) Some day, we will stop doing this to you :-)
-
-Information about ``what's ported to which machine'' is now given in
-the Installation Guide.
-The new ports since 0.23 are: \tr{hppa1.1-hp-hpux},
-\tr{i386-*-linuxaout}, and \tr{mips-sgi-irix5}.
-
-%************************************************************************
-%* *
-\subsection[0-26-config]{New configuration things in 0.26}
-%* *
-%************************************************************************
-
-We are moving towards one \tr{configure} script for all Glasgow
-functional-programming tools. Consequently, the configure options
-have been tweaked. Doing \tr{./configure --help} will show you the
-current state of play.
-
-%************************************************************************
-%* *
-\subsection[0-26-user-visible]{User-visible changes in 0.26, including incompatibilities}
-%* *
-%************************************************************************
-
-The names \tr{scc}, \tr{ccall}, and \tr{casm} are no longer stolen
-from the user's name space. (The magical constructs they once were
-have been known as \tr{_scc_}, \tr{_ccall_}, and \tr{_casm_} for some
-time now...)
-
-Similarly, \tr{trace} is no longer built-in (so you can use the name
-if you want to). You can get \tr{trace} either as \tr{_trace}
-(Glasgow extension), or as itself via \tr{import Trace} with
-\tr{-syslib hbc} (i.e., exactly like HBC).
-
-Lazy, or irrefutable, patterns with unboxed-type components are
-no longer allowed. You'll need to rewrite \tr{let (I# x) = exp ...}
-as \tr{let x = case exp of { I# i -> i } in ... }.
-
-GHC now supports hexadecimal and octal numeric syntax for integer constants.
-(But \tr{read} doesn't grok it yet...)
-
-GHC now supports specialised instances (as in HBC); you can write:
-\begin{verbatim}
-instance Eq a => Eq (Foo a) where { ... }
-{-# SPECIALIZE instance Eq (Foo Bool) #-}
-\end{verbatim}
-
-GHC's pragmas for specialised values now have a magical \tr{= blah}
-form, in which you specify the code to be used for the specialised value.
-For example:
-\begin{verbatim}
-f :: Ord a => a -> Int -> a
-{-# SPECIALIZE f :: Double -> Int -> Double = f_Double #-}
-
-f_Double :: Double -> Int -> Double
-f_Double ...
-\end{verbatim}
-In some cases, the \tr{= blah} form can be a {\em big} win.
-
-What we used to call ``threaded'' Haskell, we now call ``Concurrent
-Haskell.'' And there's a paper about it. Please see the User's Guide.
-
-``Parallel Haskell,'' running under PVM, is here. Again, see the
-User's Guide.
-
-%************************************************************************
-%* *
-\subsection[0-26-options]{New or changed GHC command-line options}
-%* *
-%************************************************************************
-
-The \tr{-g}, \tr{-p}, \tr{-pg}, \tr{-fpic}, and \tr{-fPIC} are no
-longer passed straight through to GCC. They probably buy you nothing,
-while potentially causing substantial mischief. If you know what you're doing,
-you can still use them, via \tr{-optcO-...}.
-
-The main option for Concurrent Haskell is \tr{-concurrent}; for
-Parallel Haskell, it's \tr{-parallel}.
-
-The \tr{-dict-all} option, used with \tr{-prof}, has died. It never
-did anything anyway.
-
-Besides the \tr{-fshow-specialisations} option to see what specialisations
-have occurred, you may also use the \tr{-fshow-import-specs} option
-to see what specialisations GHC would like to have had available.
-By then adding these ``desirable'' pragmas to your code, you can
-squash most of the overloading from your program.
-
-There are some new sanity-checking options. Use
-\tr{-fsignatures-required} if you want to force all top-level
-definitions to have type signatures. Use \tr{-fshadowing-not-ok}
-if you want to disallow name shadowing. You can't use the latter on
-modules that include circular definitions.
-
-The \tr{-Rghc-timing} option gives a convenient one-line summary to
-GHC's runtime and heap allocation.
-
-The \tr{-odump} option may be used to save GHC's standard-error output
-to a file. (It normally shows up on your screen.)
-
-You can now use \tr{-H0} and \tr{-K0} to reset the heap and stack
-sizes. As these sizes are normally ``maxxed up'' (\tr{-H32m -H16m}
-gets you a 32MB heap), you can use this form to decrease the size:
-\tr{-H6m -H0 -H250k} gives you a heap of 250KB.
-
-%************************************************************************
-%* *
-\subsection[0-26-io]{New in monadic I/O}
-%* *
-%************************************************************************
-
-GHC~0.26 is still a Haskell~1.2 compiler (and will remain so until
-there is a non-DRAFT 1.3 standard).
-
-We killed the \tr{PreludePrimIO} interface. You can get all the same
-functions from \tr{PreludeGlaST}.
-
-All the \tr{_IVar} and \tr{_MVar} operations are now in the 1.3
-\tr{IO} monad, not the non-standard \tr{PrimIO} monad. You now
-get them from \tr{Concurrent}, not from \tr{PreludeGlaST}.
-
-%************************************************************************
-%* *
-\subsection[0-26-new-in-compiler]{New in the compiler proper}
-%* *
-%************************************************************************
-
-The main new things are ``foldr-build'' deforestation (by Andy Gill)
-and ever-more-glorious specialisation (by Patrick Sansom).
-
-And the usual few megabytes of gratuitous changes.
-
-%************************************************************************
-%* *
-\subsection[0-26-new-in-libraries]{In the prelude and libraries}
-%* *
-%************************************************************************
-
-All of the basic state-transformer stuff now comes from
-\tr{PreludeGlaST}. The \tr{PreludePrimIO} interface no longer exists.
-
-The function \tr{foldrPrimIO} went away. The function \tr{forkPrimIO}
-sprang to life.
-
-The what-you-need-for-Concurrent-Haskell interface is \tr{Concurrent}.
-The GHC option is \tr{-concurrent}. Please see the User's Guide.
-Note that the operations @threadDelay@ and @threadWait@ now come
-from \tr{Concurrent}, not \tr{PreludeGlaMisc}.
-
-I-Vars and M-Vars (synchronising variables) are now
-specifically I/O operations, not general state-transformer operations.
-They also come from the \tr{Concurrent} interface.
-
-Renamings: what used to be the \tr{newMVar} operation is now called
-\tr{newEmptyMVar}; what was \tr{initMVar} is now \tr{newMVar}.
-
-The what-you-need-for-Parallel-Haskell interface is \tr{Parallel}.
-The GHC option is \tr{-parallel}. At the moment, the \tr{Parallel}
-interface just provides \tr{par} and \tr{seq}. But that will change.
-
-\tr{LibPosix} now provides \tr{runProcess}, our candidate for the
-high-level OS-independent operation.
-
-NEW: The \tr{Regex} (\tr{-syslib ghc}) interface provides direct
-access to the GNU regexp (regular expressions on strings) package.
-The \tr{MatchPS} interface is higher-level, providing string-matching
-functions on \tr{_PackedStrings}. (All by Sigbjorn Finne)
-
-NEW: The \tr{Readline} interface (also \tr{-syslib ghc}) provides
-access to the GNU readline package. Instant command-line editing
-for your Haskell programs. (By Darren Moffat)
-
-NEW: A ``network interface toolkit'' by Darren Moffat. BSD sockets
-for Haskell---way cool.
-
-The \tr{FiniteMap} module has two new functions, \tr{isEmptyFM} and
-\tr{elemFM}.
-
-The \tr{Maybes} module now uses the Haskell~1.3 built-in \tr{Maybe}
-type; you should use \tr{-fhaskell-1.3} with this module now.
-
-The HBC library modules \tr{Maybe}, \tr{Either}, and \tr{Option} are
-{\em gone}. Just use \tr{-fhaskell-1.3} and get the types directly
-from the Prelude.
-
-All system-library modules that use the \tr{Maybe} type now require
-\tr{-fhaskell-1.3}. For the GHC library, that means \tr{FiniteMap},
-\tr{Maybes}, \tr{Util}, \tr{Set}, \tr{Regex}, and \tr{MatchPS}. For
-the HBC library, that means \tr{ListUtil}, \tr{Native}, and
-\tr{Parse}. (In some cases, you could avoid the \tr{-fhaskell-1.3}
-requirement by doing selective imports.)
-
-GHC now supports \tr{trace} exactly like HBC: \tr{import Trace} and
-do \tr{-syslib hbc}. The built-in no-import-required version
-is now called \tr{_trace}.
-
-Instances for \tr{Shorts} and \tr{Bytes} have been added to the
-HBC library module \tr{Word}.
-
-As part of the GHC system library, we now provide an interface to the
-GNU regexp (regular-expression) library; the \tr{Regexp} interface.
-A higher-level interface, to do matches on \tr{_PackedString}s comes
-from the \tr{MatchPS} interface.
-
-We no longer provide specialisations of Prelude functions to the
-\tr{Float} type; only to \tr{Double}. It saves space, and we want to
-discourage the use of single-precision floating-point.
-
-%************************************************************************
-%* *
-\subsection[0-26-new-in-rts]{In the runtime system}
-%* *
-%************************************************************************
-
-GHC now supplies some very simple ``hooks'' to let you change the
-failure messages for stack and heap overflow, \tr{error}, and
-pattern-matching failure. Please see the User's Guide.
-
-You can now force garbage collection after every N bytes of allocation
-(presumably for stats collection, or something). Use the \tr{-j} RTS
-option.
-
-``Squeezing out'' update frames at garbage-collection time is now {\em
-on} by default. (You can turn it off with the \tr{-Z} RTS option, but
-I can't think why you would want to.)
-
-%************************************************************************
-%* *
-\subsection[0-26-new-elsewhere]{Other new stuff}
-%* *
-%************************************************************************
-
-The GHC distribution now includes an ``examples'' directory, including
-a simple shell (\tr{hsh} and quite a few to do with 1.3 I/O
-(\tr{ioNNN}) and \tr{LibPosix} (\tr{poNNN}). All in
-\tr{ghc/misc/examples}...
diff --git a/ghc/docs/release_notes/2-01-notes.lit b/ghc/docs/release_notes/2-01-notes.lit
new file mode 100644
index 0000000000..5ac4d4cf80
--- /dev/null
+++ b/ghc/docs/release_notes/2-01-notes.lit
@@ -0,0 +1,202 @@
+Release~2.01 is the first release of Glasgow Haskell for Haskell~1.3.
+It represents a major step forward in GHC development since GHC~0.26
+(July 1995). Note that we are also releasing GHC~0.29, the current
+state-of-play with the Haskell~1.2 compiler---at the same time as
+2.01.
+
+The announcement for this release is distributed as \tr{ANNOUNCE-2.01}
+in the top-level directory. It contains very important caveats about
+2.01, which we do not repeat here!
+
+Information about ``what's ported to which machine'' is in the
+Installation Guide. Since 0.26, we've improved our support for iX86
+(Intel) boxes, notably those running Linux.
+
+%************************************************************************
+%* *
+\subsection[2-01-config]{New configuration things in 2.01}
+%* *
+%************************************************************************
+
+%************************************************************************
+%* *
+\subsection[2-01-user-visible]{User-visible changes in 2.01, including incompatibilities}
+%* *
+%************************************************************************
+
+GHC~2.01 is a compiler for Haskell~1.3 and, as such, introduces many
+user-visible changes. The GHC user's guide has a section to help you
+upgrade your programs to Haskell~1.3; all user-visible changes
+are described there (and not repeated here).
+
+%************************************************************************
+%* *
+\subsection[2-01-options]{New or changed GHC command-line options}
+%* *
+%************************************************************************
+
+A new flag, \tr{-recomp} invokes the new ``recompilation checker.''
+We recommend that you use it whenever you use `make' to build your
+Haskell programs. Please see the User's Guide for details.
+
+The flags \tr{-fomit-derived-read} and
+\tr{-fomit-reexported-instances} have died; there is no longer any
+need for them.
+
+%************************************************************************
+%* *
+\subsection[2-01-new-in-compiler]{New in the compiler proper}
+%* *
+%************************************************************************
+
+Substantially rewritten. Notable points:
+\begin{itemize}
+\item
+The typechecker, besides doing all the new 1.3
+features (constructor classes, records, etc.), has been made
+ready to do linear types (e.g., there are now ``usage
+variables'' as well as ``type variables'').
+
+\item
+The Core language now has one constructor for lambdas
+(\tr{Lam}; rather than two, \tr{CoLam} and \tr{CoTyLam});
+also, one constructor for applications (\tr{App}, rather
+than two, \tr{CoApp} and \tr{CoTyApp}).
+
+Consequently, new more-general datatypes for binders and
+arguments now exist (\tr{CoreBinder} and \tr{CoreArg},
+respectively).
+
+Again, the Core language is now ``linear types''-ready
+(though the work hasn't been done yet).
+
+A new Core constructor, \tr{Coerce}, exists to support the
+1.3 \tr{newtype} construct.
+
+\item
+The ``renamer''---the part of the compiler that implements
+the Haskell module system---has been completely rewritten.
+
+In part, this is because the 1.3 module system is radically
+changed from 1.2, even if the user is unlikely to notice.
+
+We've implemented the new system with a ``go to the
+horse's mouth'' scheme; that is, to discover the facts about
+an entity \tr{Foo.bar}, we {\em always} go to the interface
+for module \tr{Foo}; hence, we can never get duff information
+about \tr{bar} from some intermediary.
+
+Interface files are no longer mandated by the language, so
+they are completely different in 2.01 compared to 0.2x. They
+will very likely change again. All processing of interface
+files is done in Haskell now (the most likely reason why GHC
+has slowed down :-().
+
+\item
+Much less special pleading for the Prelude. If you wanted
+to write your own Prelude and drop it in, you would have
+a fighting chance now.
+
+\item
+No more `make' dependency loops! (Hooray!) The whole compiler
+will build in one `make' run, no fuss or bother.
+\end{itemize}
+
+%************************************************************************
+%* *
+\subsection[2-01-new-in-libraries]{In the ``required'' libraries (incl. Prelude)}
+%* *
+%************************************************************************
+
+We support standard 1.3 monadic I/O, to the best of our knowledge.
+
+The proposal for \tr{LibPosix} didn't make it into Haskell 1.3 I/O.
+So it's now a system library, \tr{-syslib posix}. (And, of course,
+the \tr{Lib} prefix is gone.)
+
+%************************************************************************
+%* *
+\subsection[2-01-new-in-glaexts]{New in ``Glasgow extensions'' library things}
+%* *
+%************************************************************************
+
+The @PreludeGlaMisc@ and @PreludePrimIO@ interfaces have died.
+Use @PreludeGlaST@ instead.
+
+We don't really know what our interfaces-to-nonstandard-things will
+eventually look like...
+
+MallocPtrs now called ForeignObjs
+
+The @_PackedString@ gunk (with leading underscores) is gone. Just
+\tr{import PackedString} and use ``normal'' names.
+
+All of the following are {\em gone}:
+\begin{verbatim}
+data _FILE -- corresponds to a "FILE *" in C
+
+fclose :: _FILE -> PrimIO Int
+fdopen :: Int -> String -> PrimIO _FILE
+fflush :: _FILE -> PrimIO Int
+fopen :: String -> String -> PrimIO _FILE
+fread :: Int -> Int -> _FILE -> PrimIO (Int, _ByteArray Int)
+freopen :: String -> String -> _FILE -> PrimIO _FILE
+fwrite :: _ByteArray Int -> Int -> Int -> _FILE -> PrimIO Int
+
+appendChanPrimIO :: String -> String -> PrimIO ()
+appendFilePrimIO :: String -> String -> PrimIO ()
+getArgsPrimIO :: PrimIO [String]
+readChanPrimIO :: String -> PrimIO String
+\end{verbatim}
+
+%************************************************************************
+%* *
+\subsection[2-01-new-in-syslibs]{In the ``system'' libraries}
+%* *
+%************************************************************************
+
+The ``system'' libraries are no longer part of GHC (they lived in
+\tr{ghc/lib/}); they have been lifted out into a subsystem in their
+own right (they live in \tr{hslibs}).
+
+Of course, a GHC distribution will ``happen'' to have these libraries
+included; however, we hope the libraries will evolve into a large,
+flourishing, independently-maintained, and independently-distributed
+body of code---preferably compiler-independent, too!
+
+Renamings in the GHC system library (\tr{hslibs/ghc/}): The function
+\tr{BitSet.singletonBS} is now called \tr{unitBS}. Similarly,
+\tr{FiniteMap.singletonFM} is now \tr{unitFM}. \tr{Set.singletonSet}
+lingers briefly; \tr{unitSet} is also available now.
+
+We are {\em not} up-to-date with the HBC-for-1.3's HBC library (the source
+hasn't been released yet).
+
+The \tr{Either}, \tr{Maybe}, and \tr{Option} modules in the HBC
+library (\tr{hslibs/hbc/}) have been deleted---they are too close to
+what Haskell~1.3 provides anyway (hence, confusing).
+
+The POSIX support code is in \tr{hslibs/posix}.
+
+We have added a ``contrib'' system library (\tr{hslibs/contrib/});
+made up of code that was contributed to the ``Haskell library'',
+mostly by Stephen Bevan. Quite of bit of code for numerical methods
+in there...
+
+%************************************************************************
+%* *
+\subsection[2-01-new-in-rts]{In the runtime system}
+%* *
+%************************************************************************
+
+We have made a point {\em not} to beat on the runtime system very much.
+Some bugs have been fixed since 0.26, of course.
+
+The GranSim (parallel-machine simulator) stuff is substantially improved
+(but you're better off using the 0.29 version of it).
+
+%************************************************************************
+%* *
+%\subsection[2-01-new-elsewhere]{Other new stuff}
+%* *
+%************************************************************************
diff --git a/ghc/docs/release_notes/Jmakefile b/ghc/docs/release_notes/Jmakefile
index 88109ac206..6b696249f2 100644
--- a/ghc/docs/release_notes/Jmakefile
+++ b/ghc/docs/release_notes/Jmakefile
@@ -3,9 +3,6 @@
* but nothing to "install" it
*/
-LitStuffNeededHere(docs depend)
-InfoStuffNeededHere(docs)
-
LiterateSuffixRules()
DocProcessingSuffixRules()
diff --git a/ghc/docs/release_notes/release.lit b/ghc/docs/release_notes/release.lit
index b98df34d3e..49e92f1c39 100644
--- a/ghc/docs/release_notes/release.lit
+++ b/ghc/docs/release_notes/release.lit
@@ -2,13 +2,13 @@
\documentstyle[11pt,literate]{article}
\begin{document}
\title{Release notes for Glasgow Haskell}
-\author{Will Partain (for the AQUA Team)\\
+\author{The GHC Team\\
Department of Computing Science\\
University of Glasgow\\
Glasgow, Scotland\\
G12 8QQ\\
\\
-Email: glasgow-haskell-\{users,bugs\}-request\@dcs.glasgow.ac.uk}
+Email: glasgow-haskell-\{users,bugs\}-request\@dcs.gla.ac.uk}
\maketitle
\begin{rawlatex}
\tableofcontents
@@ -26,66 +26,35 @@ Email: glasgow-haskell-\{users,bugs\}-request\@dcs.glasgow.ac.uk}
% pointers to it. Mentioning something in the release notes is not
% the same as documenting it.
-\section[release-0-26]{Release notes for version~0.26---7/95}
-\input{0-26-notes.lit}
+\section[release-2-01]{Release notes for version~2.01---7/96}
+\input{2-01-notes.lit}
%\section[release-RSN]{What we hope to do Real Soon Now}
%\downsection
%\input{real-soon-now.lit}
%\upsection
-\section{Non-release notes for versions~0.24 and 0.25}
-Version~0.24 (March 1995) was a tidy-up release; it mostly
-fixed some ``threads'' problems (now ``Concurrent Haskell''),
-some I/O problems, and some porting problems.
+\section{Versions 0.26 through 0.29}
-Version~0.25 was a binary-only dump of a \tr{i386-*-linuxaout}
-build, just so people could try it.
+GHC~0.26 (7/95) was the last major release of GHC for Haskell~1.2.
-\section[release-0-23]{Release notes for version~0.23---12/94}
-\input{0-23-notes.lit}
+GHC~0.27 (12/95) was a `` binary-only from-working-sources
+no-guarantees snapshot ... for i386-unknown-linuxaout and
+i386-unknown-solaris2 platforms...''
-\section[release-0-22]{Release notes for version~0.22---7/94}
-\input{0-22-notes.lit}
+GHC~0.28 (5/96) was the same thing, for the i386-unknown-linux (ELF)
+platform.
-\section[release-0-19]{Release notes for version~0.19---12/93}
-\input{0-19-notes.lit}
+GHC~0.29 (7/96), released at the same time as 2.01, is just ``0.26
+with bug fixes''; i.e., the current state-of-play on the Haskell~1.2
+compiler development.
-\section[release-0-16]{Release notes for version~0.16---07/93}
-\input{0-16-notes.lit}
+\section{Old release notes}
-\section[release-0-10]{Release notes for version~0.10---12/92}
-\input{0-10-notes.lit}
-
-\section[release-0-09]{Release~0.09---9/92}
-
-This was an unannounced pseudo-release to a few people.
-
-\section[release-0-08]{Release notes for version~0.08---7/92}
-\input{0-08-notes.lit}
-
-\section[release-0-07]{Release~0.07}
-
-This was an unannounced pseudo-release to a few people.
-
-\section[release-0-06]{Release notes for version~0.06---3/92}
-\input{0-06-notes.lit}
-
-\section[release-0-05]{Release notes for version~0.05---12/91}
-\input{0-05-notes.lit}
-
-\section[releases-0-03-04]{Releases between 0.02 and 0.05}
-
-There were a couple of private releases to highly zealous people,
-mainly our friends at York. There are README files in the
-\tr{release_notes/} dir about those, if you are really interested.
-
-\section[release-0-02]{Release notes for version~0.02---8/91}
-\downsection
-Nothing about version 0.02, our very first release, is still
-interesting :-)
-%\input{0-02-notes.lit}
-\upsection
+We used to include the release notes back to the dawn of time in this
+document. Made for a nice long document, but it wasn't that
+interesting. If you would like to see old release notes, just ask;
+we've still got 'em.
\begin{onlystandalone}
% \printindex
diff --git a/ghc/docs/simple-monad.lhs b/ghc/docs/simple-monad.lhs
index 82157b34b5..3efb2b9bb0 100644
--- a/ghc/docs/simple-monad.lhs
+++ b/ghc/docs/simple-monad.lhs
@@ -257,7 +257,7 @@ code you're likely to write or see will look like the stuff in here.
Comments, suggestions, etc., to me, please.
Will Partain
-partain@dcs.glasgow.ac.uk
+partain@dcs.gla.ac.uk
% compile with:
% ghc -cpp <other-flags> Foo.lhs
diff --git a/ghc/docs/state_interface/Jmakefile b/ghc/docs/state_interface/Jmakefile
index e69de29bb2..08be0d0a70 100644
--- a/ghc/docs/state_interface/Jmakefile
+++ b/ghc/docs/state_interface/Jmakefile
@@ -0,0 +1,7 @@
+state-interface.dvi : state-interface.tex
+ $(RM) state-interface.dvi
+ latex state-interface.tex
+
+state-interface.tex : state-interface.verb
+ $(RM) state-interface.tex
+ expand state-interface.verb | verbatim > state-interface.tex
diff --git a/ghc/docs/state_interface/state-interface.verb b/ghc/docs/state_interface/state-interface.verb
index c51193aa97..291d5f0109 100644
--- a/ghc/docs/state_interface/state-interface.verb
+++ b/ghc/docs/state_interface/state-interface.verb
@@ -4,7 +4,7 @@
\renewcommand{\dblfloatpagefraction}{0.9}
\sloppy
-
+\renewcommand{\today}{July 1996}
\begin{document}
@@ -15,7 +15,7 @@
\tableofcontents
This ``state interface document'' corresponds to Glasgow Haskell
-version~0.23.
+version~2.01.
\section{Really primitive stuff}
@@ -81,45 +81,45 @@ negateInt# :: Int# -> Int#
@
NB: No error/overflow checking!
-\subsubsection{Unboxed-@Float@ and @Double@ operations}
-@
-{plus,minus,times,divide}Float# :: Float# -> Float# -> Float#
-negateFloat# :: Float# -> Float#
-
-float2Int# :: Float# -> Int# -- just a cast, no checking!
-int2Float# :: Int# -> Float#
-
-expFloat# :: Float# -> Float#
-logFloat# :: Float# -> Float#
-sqrtFloat# :: Float# -> Float#
-sinFloat# :: Float# -> Float#
-cosFloat# :: Float# -> Float#
-tanFloat# :: Float# -> Float#
-asinFloat# :: Float# -> Float#
-acosFloat# :: Float# -> Float#
-atanFloat# :: Float# -> Float#
-sinhFloat# :: Float# -> Float#
-coshFloat# :: Float# -> Float#
-tanhFloat# :: Float# -> Float#
-powerFloat# :: Float# -> Float# -> Float#
-@
-There's an exactly-matching set of unboxed-@Double@ ops; replace
-@Float#@ with @Double#@ in the list above. There are two
+\subsubsection{Unboxed-@Double@ and @Float@ operations}
+@
+{plus,minus,times,divide}Double# :: Double# -> Double# -> Double#
+negateDouble# :: Double# -> Double#
+
+float2Int# :: Double# -> Int# -- just a cast, no checking!
+int2Double# :: Int# -> Double#
+
+expDouble# :: Double# -> Double#
+logDouble# :: Double# -> Double#
+sqrtDouble# :: Double# -> Double#
+sinDouble# :: Double# -> Double#
+cosDouble# :: Double# -> Double#
+tanDouble# :: Double# -> Double#
+asinDouble# :: Double# -> Double#
+acosDouble# :: Double# -> Double#
+atanDouble# :: Double# -> Double#
+sinhDouble# :: Double# -> Double#
+coshDouble# :: Double# -> Double#
+tanhDouble# :: Double# -> Double#
+powerDouble# :: Double# -> Double# -> Double#
+@
+There's an exactly-matching set of unboxed-@Float@ ops; replace
+@Double#@ with @Float#@ in the list above. There are two
coercion functions for @Float#@/@Double#@:
@
float2Double# :: Float# -> Double#
double2Float# :: Double# -> Float#
@
-The primitive versions of @encodeFloat@/@decodeFloat@:
+The primitive versions of @encodeDouble@/@decodeDouble@:
@
-encodeFloat# :: Int# -> Int# -> ByteArray# -- Integer mantissa
+encodeDouble# :: Int# -> Int# -> ByteArray# -- Integer mantissa
-> Int# -- Int exponent
- -> Float#
+ -> Double#
-decodeFloat# :: Float#
- -> _ReturnIntAndGMP
+decodeDouble# :: Double#
+ -> GHCbase.ReturnIntAndGMP
@
-(And the same for @Double#@s.)
+(And the same for @Float#@s.)
\subsection{Operations on/for @Integers@ (interface to GMP)}
\label{sect:horrid-Integer-pairing-types}
@@ -127,6 +127,8 @@ decodeFloat# :: Float#
We implement @Integers@ (arbitrary-precision integers) using the GNU
multiple-precision (GMP) package.
+NB: some of this might change if we upgrade to using GMP~2.x.
+
The data type for @Integer@ must mirror that for @MP_INT@ in @gmp.h@
(see @gmp.info@). It comes out as:
@
@@ -138,11 +140,8 @@ collection of primitive types.
The operations in the GMP return other combinations of
GMP-plus-something, so we need ``pairing'' types for those, too:
@
-type _ReturnGMP = Integer -- synonym
-data _Return2GMPs = _Return2GMPs Int# Int# ByteArray#
- Int# Int# ByteArray#
-data _ReturnIntAndGMP = _ReturnIntAndGMP Int#
- Int# Int# ByteArray#
+data Return2GMPs = Return2GMPs Int# Int# ByteArray# Int# Int# ByteArray#
+data ReturnIntAndGMP = ReturnIntAndGMP Int# Int# Int# ByteArray#
-- ????? something to return a string of bytes (in the heap?)
@
@@ -162,7 +161,7 @@ cmpInteger# :: Int# -> Int# -> ByteArray#
divModInteger#, quotRemInteger#
:: Int# -> Int# -> ByteArray#
-> Int# -> Int# -> ByteArray#
- -> _Return2GMPs
+ -> GHCbase.Return2GMPs
integer2Int# :: Int# -> Int# -> ByteArray#
-> Int#
@@ -298,7 +297,7 @@ object! This is a pain: primitive ops aren't supposed to do
complicated things like enter objects. The current solution is to
return a lifted value, but I don't like it!
@
-indexArray# :: Array# elt -> Int# -> _Lift elt -- Yuk!
+indexArray# :: Array# elt -> Int# -> GHCbase.Lift elt -- Yuk!
@
\subsubsection{The state type}
@@ -314,20 +313,20 @@ code, and allocate no registers etc, for primitive states.
type State# s
@
-The type @_RealWorld@ is truly opaque: there are no values defined
+The type @GHCbuiltins.RealWorld@ is truly opaque: there are no values defined
of this type, and no operations over it. It is ``primitive'' in that
sense---but it is {\em not unboxed!} Its only role in life is to be the type
which distinguishes the @PrimIO@ state transformer (see
Section~\ref{sect:io-spec}).
@
-data _RealWorld
+data RealWorld
@
\subsubsection{States}
-A single, primitive, value of type @State# _RealWorld@ is provided.
+A single, primitive, value of type @State# RealWorld@ is provided.
@
-realWorld# :: State# _RealWorld
+realWorld# :: State# GHCbuiltins.RealWorld
@
(Note: in the compiler, not a @PrimOp@; just a mucho magic @Id@.)
@@ -455,9 +454,9 @@ The @makeStablePointer@ function converts a value into a stable pointer.
It is part of the @PrimIO@ monad, because we want to be sure we don't
allocate one twice by accident, and then only free one of the copies.
@
-makeStablePointer# :: a -> State# _RealWorld -> StateAndStablePtr# _RealWorld a
-freeStablePointer# :: StablePtr# a -> State# _RealWorld -> State# _RealWorld
-deRefStablePointer# :: StablePtr# a -> State# _RealWorld -> StateAndPtr _RealWorld a
+makeStablePointer# :: a -> State# RealWorld -> StateAndStablePtr# RealWorld a
+freeStablePointer# :: StablePtr# a -> State# RealWorld -> State# RealWorld
+deRefStablePointer# :: StablePtr# a -> State# RealWorld -> StateAndPtr RealWorld a
@
There is also a C procedure @FreeStablePtr@ which frees a stable pointer.
@@ -466,20 +465,20 @@ There is also a C procedure @FreeStablePtr@ which frees a stable pointer.
%
\subsubsection{Foreign objects}
-A \tr{ForeignObj} is a reference to an object outside the Haskell
+A @ForeignObj@ is a reference to an object outside the Haskell
world (i.e., from the C world, or a reference to an object on another
machine completely.), where the Haskell world has been told ``Let me
know when you're finished with this ...''.
-The \tr{ForeignObj} type is just a special @Addr#@ ({\em not} parameterised).
+The @ForeignObj@ type is just a special @Addr#@ ({\em not} parameterised).
@
type ForeignObj#
@
-A typical use of \tr{ForeignObj} is in constructing Haskell bindings
+A typical use of @ForeignObj@ is in constructing Haskell bindings
to external libraries. A good example is that of writing a binding to
an image-processing library (which was actually the main motivation
-for implementing \tr{ForeignObj}'s precursor, \tr{MallocPtr}). The
+for implementing @ForeignObj@'s precursor, @MallocPtr@). The
images manipulated are not stored in the Haskell heap, either because
the library insist on allocating them internally or we (sensibly)
decide to spare the GC from having to heave heavy images around.
@@ -487,12 +486,12 @@ decide to spare the GC from having to heave heavy images around.
@
data Image = Image ForeignObj#
-instance _CCallable Image
+instance CCallable Image
@
-The \tr{ForeignObj#} type is then used to refer to the externally
+The @ForeignObj#@ type is then used to refer to the externally
allocated image, and to acheive some type safety, the Haskell binding
-defines the @Image@ data type. So, a value of type \tr{ForeignObj#} is
+defines the @Image@ data type. So, a value of type @ForeignObj#@ is
used to ``box'' up an external reference into a Haskell heap object
that we can then indirectly reference:
@@ -500,23 +499,23 @@ that we can then indirectly reference:
createImage :: (Int,Int) -> PrimIO Image
@
-So far, this looks just like an @Addr#@ type, but \tr{ForeignObj#}
+So far, this looks just like an @Addr#@ type, but @ForeignObj#@
offers a bit more, namely that we can specify a {\em finalisation
-routine} to invoke when the \tr{ForeignObj#} is discarded by the
+routine} to invoke when the @ForeignObj#@ is discarded by the
GC. The garbage collector invokes the finalisation routine associated
-with the \tr{ForeignObj#}, saying `` Thanks, I'm through with this
+with the @ForeignObj#@, saying `` Thanks, I'm through with this
now..'' For the image-processing library, the finalisation routine could for
the images free up memory allocated for them. The finalisation routine has
currently to be written in C (the finalisation routine can in turn call on
@FreeStablePtr@ to deallocate a stable pointer.).
Associating a finalisation routine with an external object is done by
-\tr{makeForeignObj#}:
+@makeForeignObj#@:
@
makeForeignObj# :: Addr# -- foreign reference
-> Addr# -- pointer to finalisation routine
- -> StateAndForeignObj# _RealWorld ForeignObj#
+ -> StateAndForeignObj# RealWorld ForeignObj#
@
(Implementation: a linked list of all @ForeignObj#@s is maintained to allow the
@@ -568,11 +567,11 @@ of space recovered.
\subsection{The @errorIO#@ primitive operation}
-The @errorIO#@ primitive takes an argument of type @PrimIO@. It aborts execution of
-the current program, and continues instead by performing the given @PrimIO@ value
+The @errorIO#@ primitive takes an argument much like @PrimIO@. It aborts execution of
+the current program, and continues instead by performing the given @PrimIO@-like value
on the current state of the world.
@
-errorIO# :: PrimIO () -> a
+errorIO# :: (State RealWorld -> ((), State RealWorld)) -> a
@
\subsection{C Calls}
@@ -583,7 +582,7 @@ argument not last argument.
The @ccall#@ primitive can't be given an ordinary type, because it has
a variable number of arguments. The nearest we can get is:
@
-ccall# :: CRoutine -> a1# -> ... -> an# -> State# _RealWorld -> StateAndR# _RealWorld
+ccall# :: CRoutine -> a1# -> ... -> an# -> State# RealWorld -> StateAndR# RealWorld
@
where the type variables @a1#@\ldots@an#@ and @r#@ can be instantiated by any
primitive type, and @StateAndR#@ is the appropriate pairing type from
@@ -601,7 +600,7 @@ identifier. The only way it is possible to generate a @ccall#@ is via the
All this applies equally to @casm#@:
@
-casm# :: CAsmString -> a1# -> ... -> an# -> State# _RealWorld -> StateAndR# _RealWorld
+casm# :: CAsmString -> a1# -> ... -> an# -> State# RealWorld -> StateAndR# RealWorld
@
%------------------------------------------------------------
@@ -614,50 +613,50 @@ casm# :: CAsmString -> a1# -> ... -> an# -> State# _RealWorld -> StateAndR# _Re
A state transformer is a function from a state to a pair of a result and a new
state.
@
-type _ST s a = _State s -> (a, _State s)
+newtype ST s a = ST (State s -> (a, State s))
@
-The @_ST@ type is {\em abstract}, so that the programmer cannot see its
+The @ST@ type is {\em abstract}, so that the programmer cannot see its
representation. If he could, he could write bad things like:
@
-bad :: _ST s a
-bad = \s -> ...(f s)...(g s)...
+bad :: ST s a
+bad = ST $ \ s -> ...(f s)...(g s)...
@
Here, @s@ is duplicated, which would be bad news.
A state is represented by a primitive state value, of type @State# s@,
-wrapped up in a @_State@ constructor. The reason for boxing it in this
+wrapped up in a @State@ constructor. The reason for boxing it in this
way is so that we can be strict or lazy in the state. (Remember, all
primitive types are unboxed, and hence can't be bottom; but types built
with @data@ are all boxed.)
@
-data _State s = S# (State# s)
-@
+data State s = S# (State# s)
+@
\subsubsection{The state transformer combinators}
-Now for the combinators, all of which live inside the @_ST@
+Now for the combinators, all of which live inside the @ST@
abstraction. Notice that @returnST@ and @thenST@ are lazy in the
state.
@
-returnST :: a -> _ST s a
+returnST :: a -> ST s a
returnST a s = (a, s)
-thenST :: _ST s a -> (a -> _ST s b) -> _ST s b
+thenST :: ST s a -> (a -> ST s b) -> ST s b
thenST m k s = let (r,new_s) = m s
in
k r new_s
-fixST :: (a -> _ST s a) -> _ST s a
+fixST :: (a -> ST s a) -> ST s a
fixST k s = let ans = k r s
(r,new_s) = ans
in
ans
@
-The interesting one is, of course, @_runST@. We can't infer its type!
+The interesting one is, of course, @runST@. We can't infer its type!
(It has a funny name because it must be wired into the compiler.)
@
--- _runST :: forall a. (forall s. _ST s a) -> a
-_runST m = case m (S# realWorld#) of
+-- runST :: forall a. (forall s. ST s a) -> a
+runST m = case m (S# realWorld#) of
(r,_) -> r
@
@@ -668,7 +667,7 @@ fundamental combinators above. The @seqST@ combinator is like
@thenST@, except that it discards the result of the first state
transformer:
@
-seqST :: _ST s a -> _ST s b -> _ST s b
+seqST :: ST s a -> ST s b -> ST s b
seqST m1 m2 = m1 `thenST` (\_ -> m2)
@
@@ -687,7 +686,7 @@ seqStrictlyST m k = ... ditto, for seqST ...
The combinator @listST@ takes a list of state transformers, and
composes them in sequence, returning a list of their results:
@
-listST :: [_ST s a] -> _ST s [a]
+listST :: [ST s a] -> ST s [a]
listST [] = returnST []
listST (m:ms) = m `thenST` \ r ->
listST ms `thenST` \ rs ->
@@ -696,13 +695,13 @@ listST (m:ms) = m `thenST` \ r ->
The @mapST@ combinator ``lifts'' a function from a value to state
transformers to one which works over a list of values:
@
-mapST :: (a -> _ST s b) -> [a] -> _ST s [b]
+mapST :: (a -> ST s b) -> [a] -> ST s [b]
mapST f ms = listST (map f ms)
@
The @mapAndUnzipST@ combinator is similar to @mapST@, except that here the
function returns a pair:
@
-mapAndUnzipST :: (a -> _ST s (b,c)) -> [a] -> _ST s ([b],[c])
+mapAndUnzipST :: (a -> ST s (b,c)) -> [a] -> ST s ([b],[c])
mapAndUnzipST f (m:ms)
= f m `thenST` \ ( r1, r2) ->
mapAndUnzipST f ms `thenST` \ (rs1, rs2) ->
@@ -713,15 +712,15 @@ mapAndUnzipST f (m:ms)
\label{sect:io-spec}
The @PrimIO@ type is defined in as a state transformer which manipulates the
-@_RealWorld@.
+@RealWorld@.
@
-type PrimIO a = _ST _RealWorld a -- Transparent
+type PrimIO a = ST RealWorld a -- Transparent
@
The @PrimIO@ type is an ordinary type synonym, transparent to the programmer.
-The type @_RealWorld@ and value @realWorld#@ do not need to be hidden (although
+The type @RealWorld@ and value @realWorld#@ do not need to be hidden (although
there is no particular point in exposing them). Even having a value of type
-@realWorld#@ does not compromise safety, since the type @_ST@ is hidden.
+@realWorld#@ does not compromise safety, since the type @ST@ is hidden.
It is type-correct to use @returnST@ in an I/O context, but it is a
bit more efficient to use @returnPrimIO@. The latter is strict in the
@@ -766,11 +765,11 @@ mapAndUnzipPrimIO f (m:ms)
\subsubsection{Types}
@
-data Array ix elt = _Array (ix,ix) (Array# elt)
-data _ByteArray ix = _ByteArray (ix,ix) ByteArray#
+data Array ix elt = Array (ix,ix) (Array# elt)
+data ByteArray ix = ByteArray (ix,ix) ByteArray#
-data _MutableArray s ix elt = _MutableArray (ix,ix) (MutableArray# s elt)
-data _MutableByteArray s ix = _MutableByteArray (ix,ix) (MutableByteArray# s)
+data MutableArray s ix elt = MutableArray (ix,ix) (MutableArray# s elt)
+data MutableByteArray s ix = MutableByteArray (ix,ix) (MutableByteArray# s)
@
\subsubsection{Operations on immutable arrays}
@@ -779,14 +778,14 @@ Ordinary array indexing is straightforward.
@
(!) :: Ix ix => Array ix elt -> ix -> elt
@
-QUESTIONs: should @_ByteArray@s be indexed by Ints or ix? With byte offsets
+QUESTIONs: should @ByteArray@s be indexed by Ints or ix? With byte offsets
or sized ones? (sized ones [WDP])
@
-indexCharArray :: Ix ix => _ByteArray ix -> ix -> Char
-indexIntArray :: Ix ix => _ByteArray ix -> ix -> Int
-indexAddrArray :: Ix ix => _ByteArray ix -> ix -> _Addr
-indexFloatArray :: Ix ix => _ByteArray ix -> ix -> Float
-indexDoubleArray :: Ix ix => _ByteArray ix -> ix -> Double
+indexCharArray :: Ix ix => ByteArray ix -> ix -> Char
+indexIntArray :: Ix ix => ByteArray ix -> ix -> Int
+indexAddrArray :: Ix ix => ByteArray ix -> ix -> Addr
+indexFloatArray :: Ix ix => ByteArray ix -> ix -> Float
+indexDoubleArray :: Ix ix => ByteArray ix -> ix -> Double
@
@Addr@s are indexed straightforwardly by @Int@s. Unlike the primitive
operations, though, the offsets assume that the array consists entirely of the
@@ -805,45 +804,45 @@ indexStaticArray :: Addr -> Int -> Addr
\subsubsection{Operations on mutable arrays}
@
-newArray :: Ix ix => (ix,ix) -> elt -> _ST s (_MutableArray s ix elt)
-newCharArray :: Ix ix => (ix,ix) -> _ST s (_MutableByteArray s ix)
+newArray :: Ix ix => (ix,ix) -> elt -> ST s (MutableArray s ix elt)
+newCharArray :: Ix ix => (ix,ix) -> ST s (MutableByteArray s ix)
...
@
@
-readArray :: Ix ix => _MutableArray s ix elt -> ix -> _ST s elt
-readCharArray :: Ix ix => _MutableByteArray s ix -> ix -> _ST s Char
+readArray :: Ix ix => MutableArray s ix elt -> ix -> ST s elt
+readCharArray :: Ix ix => MutableByteArray s ix -> ix -> ST s Char
...
@
@
-writeArray :: Ix ix => _MutableArray s ix elt -> ix -> elt -> _ST s ()
-writeCharArray :: Ix ix => _MutableByteArray s ix -> ix -> Char -> _ST s ()
+writeArray :: Ix ix => MutableArray s ix elt -> ix -> elt -> ST s ()
+writeCharArray :: Ix ix => MutableByteArray s ix -> ix -> Char -> ST s ()
...
@
@
-freezeArray :: Ix ix => _MutableArray s ix elt -> _ST s (Array ix elt)
-freezeCharArray :: Ix ix => _MutableByteArray s ix -> _ST s (_ByteArray ix)
+freezeArray :: Ix ix => MutableArray s ix elt -> ST s (Array ix elt)
+freezeCharArray :: Ix ix => MutableByteArray s ix -> ST s (ByteArray ix)
...
@
We have no need on one-function-per-type for unsafe freezing:
@
-unsafeFreezeArray :: Ix ix => _MutableArray s ix elt -> _ST s (Array ix elt)
-unsafeFreezeByteArray :: Ix ix => _MutableByteArray s ix -> _ST s (_ByteArray ix)
+unsafeFreezeArray :: Ix ix => MutableArray s ix elt -> ST s (Array ix elt)
+unsafeFreezeByteArray :: Ix ix => MutableByteArray s ix -> ST s (ByteArray ix)
@
Sometimes we want to snaffle the bounds of one of these beasts:
@
-boundsOfArray :: Ix ix => _MutableArray s ix elt -> (ix, ix)
-boundsOfByteArray :: Ix ix => _MutableByteArray s ix -> (ix, ix)
+boundsOfArray :: Ix ix => MutableArray s ix elt -> (ix, ix)
+boundsOfByteArray :: Ix ix => MutableByteArray s ix -> (ix, ix)
@
Lastly, ``equality'':
@
-sameMutableArray :: _MutableArray s ix elt -> _MutableArray s ix elt -> Bool
-sameMutableByteArray :: _MutableByteArray s ix -> _MutableByteArray s ix -> Bool
+sameMutableArray :: MutableArray s ix elt -> MutableArray s ix elt -> Bool
+sameMutableByteArray :: MutableByteArray s ix -> MutableByteArray s ix -> Bool
@
@@ -854,14 +853,14 @@ sameMutableByteArray :: _MutableByteArray s ix -> _MutableByteArray s ix -> Bool
Mutable variables are (for now anyway) implemented as arrays. The @MutableVar@ type
is opaque, so we can change the implementation later if we want.
@
-type MutableVar s a = _MutableArray s Int a
+type MutableVar s a = MutableArray s Int a
@
\subsubsection{Operations}
@
-newVar :: a -> _ST s (MutableVar s a)
-readVar :: MutableVar s a -> _ST s a
-writeVar :: MutableVar s a -> a -> _ST s ()
+newVar :: a -> ST s (MutableVar s a)
+readVar :: MutableVar s a -> ST s a
+writeVar :: MutableVar s a -> a -> ST s ()
sameVar :: MutableVar s a -> MutableVar s a -> Bool
@
@@ -869,19 +868,19 @@ sameVar :: MutableVar s a -> MutableVar s a -> Bool
Nothing exciting here, just simple boxing up.
@
-data _StablePtr a = _StablePtr (StablePtr# a)
+data StablePtr a = StablePtr (StablePtr# a)
-makeStablePointer :: a -> _StablePtr a
-freeStablePointer :: _StablePtr a -> PrimIO ()
+makeStablePointer :: a -> StablePtr a
+freeStablePointer :: StablePtr a -> PrimIO ()
@
\subsection{Foreign objects}
Again, just boxing up.
@
-data _ForeignObj = _ForeignObj ForeignObj#
+data ForeignObj = ForeignObj ForeignObj#
-makeForeignObj :: _Addr -> _Addr -> PrimIO _ForeignObj
+makeForeignObj :: Addr -> Addr -> PrimIO ForeignObj
@
\subsection{C calls}
@@ -919,34 +918,34 @@ data Screen = S# CHeapPtr#
There are other types that can be passed to C (C-callable). This
table summarises (including the standard boxed-primitive types):
@
-Boxed Type of transferd Corresp. Which is
-Type Prim. component C type *probably*...
------- --------------- ------ -------------
-Char Char# StgChar unsigned char
-Int Int# StgInt long int
-_Word Word# StgWord unsigned long int
-_Addr Addr# StgAddr char *
-Float Float# StgFloat float
-Double Double# StgDouble double
-
-Array Array# StgArray StgPtr
-_ByteArray ByteArray# StgByteArray StgPtr
-_MutableArray MutableArray# StgArray StgPtr
-_MutableByteArray MutableByteArray# StgByteArray StgPtr
-
-_State State# nothing!
-
-_StablePtr StablePtr# StgStablePtr StgPtr
-_ForeignObj ForeignObj# StgForeignObj StgPtr
+Boxed Type of transferd Corresp. Which is
+Type Prim. component C type *probably*...
+------ --------------- ------ -------------
+Char Char# StgChar unsigned char
+Int Int# StgInt long int
+Word Word# StgWord unsigned long int
+Addr Addr# StgAddr char *
+Float Float# StgFloat float
+Double Double# StgDouble double
+
+Array Array# StgArray StgPtr
+ByteArray ByteArray# StgByteArray StgPtr
+MutableArray MutableArray# StgArray StgPtr
+MutableByteArray MutableByteArray# StgByteArray StgPtr
+
+State State# nothing!
+
+StablePtr StablePtr# StgStablePtr StgPtr
+ForeignObj ForeignObj# StgForeignObj StgPtr
@
All of the above are {\em C-returnable} except:
@
- Array, _ByteArray, _MutableArray, _MutableByteArray
+ Array, ByteArray, MutableArray, MutableByteArray
@
-{\bf ToDo:} I'm pretty wary of @Array@ and @_MutableArray@ being in
-this list, and not too happy about @_State@ [WDP].
+{\bf ToDo:} I'm pretty wary of @Array@ and @MutableArray@ being in
+this list, and not too happy about @State@ [WDP].
{\bf ToDo:} Can code generator pass all the primitive types? Should this be
extended to include {\tt Bool\/} (or any enumeration type?)
@@ -993,29 +992,24 @@ are stored on the heap.
@
data Char = C# Char#
data Int = I# Int#
-data _Word = W# Word#
-data _Addr = A# Addr#
+data Word = W# Word#
+data Addr = A# Addr#
data Float = F# Float#
data Double = D# Double#
data Integer = J# Int# Int# ByteArray#
-- and the other boxed-primitive types:
- Array, _ByteArray, _MutableArray, _MutableByteArray,
- _StablePtr, _ForeignObj
+ Array, ByteArray, MutableArray, MutableByteArray,
+ StablePtr, ForeignObj
data Bool = False | True
-data CMP_TAG# = LT# | EQ# | GT# -- used in derived comparisons
+data Ordering = LT | EQ | GT -- used in derived comparisons
data List a = [] | a : (List a)
-- tuples...
-data Ratio a = a :% a
-type Rational = Ratio Integer
-
-data {Request,Response,etc} -- so we can check the type of "main"
-
-data _Lift a = _Lift a -- used Yukkily as described elsewhere
+data Lift a = Lift a -- used Yukkily as described elsewhere
type String = [Char] -- convenience, only
@
@@ -1032,16 +1026,13 @@ for you, and if you don't use any weird flags (notably
Haskell report says, and the full user namespaces should be available
to you.
-Exception: until we burn in the new names @_scc_@ and @_ccall_@, the
-names @scc@ and @ccall@ are still available.
-
\subsection{If you mess about with @import Prelude@...}
-Innocent renaming and hiding, e.g.,
+Innocent hiding, e.g.,
@
-import Prelude hiding ( fromIntegral ) renaming (map to mop)
+import Prelude hiding ( fromIntegral )
@
-should work just fine (even it {\em is} atrocious programming practice).
+should work just fine.
There are some things you can do that will make GHC crash, e.g.,
hiding a standard class:
@@ -1059,13 +1050,5 @@ It is possible that some name conflicts between your code and the
wired-in things might spring to life (though we doubt it...).
Change your names :-)
-\subsection{@import PreludeGlaST@}
-
-@
-type ST s a = _ST s a -- so you don't need -fglasgow-exts...
-@
-
-\subsection{@import PreludeGlaMisc@}
-
\end{document}
diff --git a/ghc/docs/users_guide/Jmakefile b/ghc/docs/users_guide/Jmakefile
index 018e0e1b2d..e2fa72bd5d 100644
--- a/ghc/docs/users_guide/Jmakefile
+++ b/ghc/docs/users_guide/Jmakefile
@@ -1,6 +1,3 @@
-LitStuffNeededHere(docs depend)
-InfoStuffNeededHere(docs)
-
LiterateSuffixRules()
DocProcessingSuffixRules()
diff --git a/ghc/docs/users_guide/backwards.lit b/ghc/docs/users_guide/backwards.lit
new file mode 100644
index 0000000000..cdd450d524
--- /dev/null
+++ b/ghc/docs/users_guide/backwards.lit
@@ -0,0 +1,279 @@
+%************************************************************************
+%* *
+\section[backwards]{Backwards compatibility: Converting from GHC 0.xx and Haskell~1.2}
+\index{GHC vs the Haskell 1.2 language}
+\index{Haskell 1.2 language vs GHC}
+%* *
+%************************************************************************
+
+This part of the guide is to help people upgrading from a
+previous version of GHC. Right now, it is mostly to help people
+switching from GHC~0.26 (a Haskell~1.2 compiler, mostly) to GHC~2.01
+(a Haskell~1.3 compiler).
+
+%ToDo: index
+
+If you need to maintain Haskell code that will work for multiple
+versions of GHC, you can use the \tr{-cpp} flag and the
+\tr{__GLASGOW_HASKELL__} pre-processor variable.
+
+For example, in GHC~0.26, \tr{__GLASGOW_HASKELL__} will be 26; for~2.01,
+it will be 201. Thus, you can write:
+\begin{verbatim}
+#if __HASKELL1__ <= 2
+main = appendChan stdout "Hello, world!\n" exit done -- 1.2
+#else
+# if __GLASGOW_HASKELL__ >= 200
+import IO
+main = putStr "Hello, world!" -- real 1.3
+# else
+main = putStr "Hello, world!\n" -- pseudo-1.3 in 0.2x
+# endif
+#endif
+\end{verbatim}
+
+%************************************************************************
+%* *
+\subsection{Types}
+%* *
+%************************************************************************
+
+A big new thing in Haskell~1.3 is constructor classes. Humble old functions
+such as @map@ now have an exciting new type:
+\begin{verbatim}
+map :: Functor f => (a->b) -> f a -> f b
+\end{verbatim}
+These new overloadings, expecially where it's the type constructor
+that's overloaded (as in @map@) can give rise to some puzzling error
+messages. For example:
+\begin{code}
+ lookupColor :: String -> [(String, (a, b, c))] -> (a, b, c)
+ lookupColor colorName colorTable =
+ head [(r,g,b) | (c,(r,g,b)) <- colorTable, c == map toLower colorName]
+\end{code}
+With the type signature this is fine, but if you omit the type signature
+you'll get:
+\begin{verbatim}
+ "Color.hs", line 49: No instance for: Prelude.Eq (a{-a18d-} Prelude.Char)
+ "Color.hs", line 49:
+ at a use of an overloaded identifier: `Prelude.meth.Prelude.Eq.=='
+\end{verbatim}
+@map@ no longer says that @colorName@ has to be a list; it could
+be any type of the form (t @Char@). Unfortunately, lookupColor has to
+take equality over these (t @Char@) things, so it gets stuck trying to figure
+out how to resolve (@Eq@ (t @Char@)) when it knows nothing about t.
+
+The solution to such messages is to add type signatures.
+
+%************************************************************************
+%* *
+\subsection{Lexical matters}
+%* *
+%************************************************************************
+
+Old uses of `compose' (\tr{(.)}) can magically turn into qualified
+names; e.g., \tr{LiteralInt.leStringToInt}; add spaces.
+
+Leading-underscore names (a Glasgow extension) don't work anymore,
+even with \tr{-fglasgow-exts}.
+
+The \tr{Prelude.lex} function doesn't grok Haskell comments any more
+(a good simplification, but a change nonetheless).
+
+%************************************************************************
+%* *
+\subsection{Expressions and patterns}
+%* *
+%************************************************************************
+
+You used to be able to define non-binary functions in a pseudo-infix-y
+say; e.g., \tr{(a `op` b) c = ...}. Illegal now.
+
+New keyword: \tr{do}. Any old variables named \tr{do} will now cause
+syntax errors.
+
+%************************************************************************
+%* *
+\subsection{Converting @Dialogue@ I/O}
+%* *
+%************************************************************************
+
+In most cases, it's really easy to convert from Haskell~1.2's I/O to
+the monadic I/O of Haskell~1.3.
+
+\begin{enumerate}
+\item
+The type \tr{Dialogue} usually can become \tr{IO ()}.
+\item
+Change \tr{readChan stdin} to \tr{getContents}.
+\item
+Change \tr{appendChan stdout} to \tr{putStr}, which is equivalent to
+\tr{hPutStr stdout}.
+Change \tr{appendChan stderr} to \tr{hPutStr stderr}.
+\item
+You need to \tr{import IO} to do any interesting I/O; in particular,
+anything to do with @Handle@s.
+\item
+You need to \tr{import System} if you used @getArgs@, @getEnv@,
+or @getProgName@.
+\item
+Assuming continuation-style @Dialogue@ code, change \tr{... exit done $}
+to \tr{... >>}. Change \tr{... exit $ \ foo ->} to \tr{... >>= \ foo ->}.
+\item
+Sometimes, when you change your \tr{main} routine to ``do'' notation,
+you'll get an error message like:
+\begin{verbatim}
+"Main.hs", line 32: No instance for: Prelude.MonadZero IO
+ "Main.hs", line 32: in a do statement
+\end{verbatim}
+This probably means that you need to ``twiddle'' some patterns; e.g.,
+I added the twiddle to the \tr{getArgs}-related pattern here:
+\begin{verbatim}
+main = do
+ ~[a1] <- getArgs
+ let x = fst (head ((reads::ReadS Int) a1)
+ putStr (show (result x))
+\end{verbatim}
+\item
+If you had any functions named \tr{(>>)}, \tr{(>>=)}, or \tr{return},
+change them to something else.
+\item
+If you used the \tr{StatusFile} I/O request, do something else. No
+exact equivalent exists in 1.3.
+\end{enumerate}
+
+%************************************************************************
+%* *
+\subsection{Converting from pre-1.3 monadic I/O}
+%* *
+%************************************************************************
+
+GHC~0.26 supported an early DRAFT of the Haskell~1.3 monadic I/O
+facilities. Inevitably, what Really Made It into 1.3 is not quite
+what was in the draft.
+
+What was called \tr{handle} in the draft is now called \tr{catch}.
+
+The type of the function \tr{fail} changed between draft and real-thing.
+Old: \tr{fail x}; new: \tr{fail (userError x)}.
+Also, what used to be \tr{failWith x} is now just \tr{fail x}.
+
+The function \tr{try} didn't make it into 1.3 I/O. GHC supplies it
+(at least for now) as @GHCio.tryIO@.
+
+All the system modules named \tr{LibSomething} dropped the \tr{Lib}.
+So: \tr{LibSystem} is now just \tr{System}.
+
+In~0.26, you could mix-n-match @IO@ with @PrimIO@, simply because the
+implementation happend to allow it. Not any more.
+
+The \tr{IOError} type is now abstract; you cannot see it's
+constructors. 1.3 provides functions to query errors.
+
+%************************************************************************
+%* *
+\subsection{Use of the standard Prelude}
+%* *
+%************************************************************************
+
+As with any previous Prelude/standard-modules changes, if you have
+top-level functions that name-clash with imported entities, you'll get
+compiler errors. So, for example, if your code defines a function
+called \tr{seq} (entirely cool in Haskell~1.2), you will now get a
+compiler error because there is now a Prelude entity called \tr{seq}.
+(You may, of course, \tr{import Prelude hiding (seq)}, if you wish).
+
+Names that frequently clash with new Prelude names:
+\tr{fail},
+\tr{lookup},
+\tr{seq}.
+
+Add \tr{import Ratio} if you use \tr{Rationals} at all.
+Ditto: \tr{import Complex} if you use complex numbers.
+Ditto: \tr{import Array} if you use arrays.
+
+As suggested above, any \tr{LibXXX} system modules are now just
+\tr{XXX}.
+
+Also: note that
+Arrays now use ordinary pairs, rather than a separate \tr{Assoc} type.
+In some modules, we've found it easier to do:
+\begin{verbatim}
+infix 1 =:
+(=:) a b = (a,b)
+\end{verbatim}
+and globally replace @:=@ with @=:@.
+Works fine for expressions; useless for patterns, however.
+
+For \tr{minInt}/\tr{maxInt} and \tr{minChar}/\tr{maxChar},
+use the \tr{Bounded} class methods, \tr{minBound} and \tr{maxBound}.
+
+Replace class \tr{Text} with \tr{Show}; on rare occasions,
+you may need to do something for \tr{Read}, too.
+
+The functions \tr{ord} and \tr{chr} have been replaced by
+the class methods \tr{fromEnum} and \tr{toEnum}, respectively.
+The changes, however, can lead to ambiguous overloading.
+
+The functions \tr{even} and \tr{odd} used to be methods of class
+@Integral@. They are now ordinary, overloaded functions.
+
+The \tr{print} function now appends a newline to its output. This is
+good, but different.
+
+\tr{readDec} no longer exists; use \tr{(reads::ReadS Int)}, or similar.
+
+If you relied on \tr{take}, \tr{drop}, \tr{splitAt}, etc., being
+overloaded, you will need to switch to \tr{genericTake},
+\tr{genericDrop}, etc., (imported from \tr{List}).
+
+%************************************************************************
+%* *
+\subsection{The module system}
+%* *
+%************************************************************************
+
+GHC~2.01 is fussier than 0.26 about junk in import lists. This is a
+feature.
+
+\tr{Foo..} (in export lists) must be changed to \tr{module Foo}.
+
+Type synonyms may be imported/exported with or without \tr{(..)}---it
+was required in Haskell~1.2--but you get a warning if you do it with.
+
+%************************************************************************
+%* *
+\subsection{Use of Glasgow extensions}
+%* *
+%************************************************************************
+
+Leading-underscore names are {\em gone}. Simon hated them.
+
+To get access to GHC innards, you will need to import system modules
+with names of the form \tr{GHCxxx}. We plan to restrict access to
+such interfaces in the future; and, of course, we reserve the right to
+make horrible changes to \tr{GHC*} modules....
+
+You can't dig around inside the @ST@/@IO@ monads quite so freely.
+
+If you want to provide your own @mainPrimIO@ routine, it must be
+in a module @GHCmain@, not @Main@.
+
+The old \tr{PreludePrimIO} interface is DEAD.
+
+The even-older \tr{PreludeGlaIO} interface is DEADER.
+
+@returnPrimIO@, @thenPrimIO@, and @seqPrimIO@ are deprecated. You
+were warned.
+
+@foldrPrimIO@ has died.
+
+@_FILE@, @fclose@, @fdopen@, @fflush@, @fopen@, @fread@, @freopen@,
+and @fwrite@ are dead.
+
+@appendChanPrimIO@, @appendFilePrimIO@, @getArgsPrimIO@, and
+@readChanPrimIO@ are dead (as previously warned).
+
+The \tr{LibPosix} stuff didn't make it into 1.3 I/O, so it has become
+a ``system library'' (\tr{-syslib posix}). Other than dropping the
+\tr{Lib*} prefix, everything should be the same as in 0.26.
diff --git a/ghc/docs/users_guide/glasgow_exts.lit b/ghc/docs/users_guide/glasgow_exts.lit
index f09235b4c5..b10b282dac 100644
--- a/ghc/docs/users_guide/glasgow_exts.lit
+++ b/ghc/docs/users_guide/glasgow_exts.lit
@@ -28,18 +28,12 @@ to the raw machine types and operations; included in this are
``primitive arrays'' (direct access to Big Wads of Bytes).
Please see \Sectionref{glasgow-unboxed} and following.
-%\item[Synchronising variables---\tr{_IVar}s, \tr{_MVar}s:]
-%These are used when reads and writes need to be coordinated,
-%e.g., if the readers and writers are different concurrent threads.
-%Please see \Sectionref{ivars-mvars}.
-
\item[Calling out to C:] Just what it sounds like. We provide {\em
lots} of rope that you can dangle around your neck.
Please see \Sectionref{glasgow-ccalls}.
-\item[``Monadic I/O:''] This stuff will be coming to you For Real
-with Haskell~1.3, whenever that is.
-Please see \Sectionref{io-1-3} (the ``1.3 I/O'' section).
+\item[Low-level monadic I/O:] Monadic I/O is now standard with Haskell~1.3;
+you can still get access to the system at a lower level (the ``PrimIO'' level).
\item[``HBC-ish'' extensions:] Extensions implemented because people said,
``HBC does Y. Could you teach GHC to do the same?'' Please see
@@ -138,13 +132,13 @@ That is, GHC provides a safe way to pass Haskell pointers to C.
Please see \Sectionref{glasgow-stablePtrs} for more details.
-\item[``Malloc'' pointers:]
-A ``malloc'' pointer is a safe way to pass a C~pointer to Haskell and
+\item[``Foreign objects'':]
+A ``foreign object'' is a safe way to pass a C~pointer to Haskell and
have Haskell do the Right Thing when it no longer references the
object. So, for example, C could pass a large bitmap over to Haskell
and say ``please free this memory when you're done with it.''
-Please see \Sectionref{glasgow-mallocPtrs} for more details.
+Please see \Sectionref{glasgow-foreignObjs} for more details.
\end{description}
See sections~1.4 and~1.6 of the ``state interface document'' for the
@@ -153,6 +147,35 @@ them.
%************************************************************************
+%* *
+\subsection[own-mainPrimIO]{Using your own @mainPrimIO@}
+\index{mainPrimIO, rolling your own}
+%* *
+%************************************************************************
+
+Normally, the GHC runtime system begins things by called an internal
+function @mainPrimIO :: PrimIO ()@ which, in turn, fires up
+your @Main.main@.
+
+To subvert the above process, you need only provide a
+@mainPrimIO :: PrimIO ()@ of your own (in a module named \tr{GHCmain}).
+
+Here's a little example, stolen from Alastair Reid:
+\begin{verbatim}
+module GHCmain ( mainPrimIO ) where
+
+import PreludeGlaST
+
+mainPrimIO :: PrimIO ()
+mainPrimIO = do
+ sleep 5
+ _ccall_ printf "%d\n" (14::Int)
+
+sleep :: Int -> PrimIO ()
+sleep t = _ccall_ sleep t
+\end{verbatim}
+
+%************************************************************************
%* *
\subsection[glasgow-ccalls]{Calling~C directly from Haskell}
\index{C calls (Glasgow extension)}
@@ -166,25 +189,14 @@ them.
%import PreludePrimIO
%\end{verbatim}
-SINCE VERSION 0.22: ``Literal-literals'', e.g., \tr{``NULL''}, can now
-be any `boxed-primitive' type---they are not automatically taken to be
-\tr{_Addr}s. This is cool, except you may sometimes have to put in
-a type signature to force the desired type.
-
-SINCE VERSION 0.19: \tr{ccall} and \tr{casm} have been renamed to
-\tr{_ccall_} and \tr{_casm_} and \tr{veryDangerousCcall} and
-\tr{veryDangerousCasm} have been removed. It is no longer necessary
-(nor legal!) to unbox/rebox the arguments and results to @_ccall_@.
-GHC does the unboxing/reboxing for you.
-
GOOD ADVICE: Because this stuff is not Entirely Stable as far as names
and things go, you would be well-advised to keep your C-callery
corraled in a few modules, rather than sprinkled all over your code.
It will then be quite easy to update later on.
-WARNING AS OF 0.26: Yes, the \tr{_ccall_} stuff probably {\em will
-change}, to something better, of course! We are only at the
-musing-about-it stage, however.
+WARNING AS OF 2.01: Yes, the \tr{_ccall_} stuff probably {\em will
+change}, to something better, of course! We are still at the
+musing-about-it stage, however...
%************************************************************************
%* *
@@ -196,16 +208,16 @@ The simplest way to use a simple C function
\begin{verbatim}
double fooC( FILE *in, char c, int i, double d, unsigned int u )
\end{verbatim}
-is to provide a Haskell wrapper
+is to provide a Haskell wrapper:
\begin{verbatim}
-fooH :: Char -> Int -> Double -> _Word -> PrimIO Double
+fooH :: Char -> Int -> Double -> Word -> PrimIO Double
fooH c i d w = _ccall_ fooC ``stdin'' c i d w
\end{verbatim}
The function @fooH@ will unbox all of its arguments, call the C
function \tr{fooC} and box the corresponding arguments.
So, if you want to do C-calling, you have to confront the underlying
-Glasgow I/O system. It's just your typical monad whatnot.
+I/O system (at the ``PrimIO'' level).
%The code in \tr{ghc/lib/glaExts/*.lhs} is not too obtuse.
%That code, plus \tr{lib/prelude/Builtin.hs}, give examples
@@ -218,12 +230,12 @@ may be just the ticket (NB: {\em no chance} of such code going through
a native-code generator):
\begin{verbatim}
oldGetEnv name
- = _casm_ ``%r = getenv((char *) %0);'' name `thenPrimIO` \ litstring@(A# str#) ->
- returnPrimIO (
+ = _casm_ ``%r = getenv((char *) %0);'' name >>= \ litstring@(A# str#) ->
+ return (
if (litstring == ``NULL'') then
- Failure (SearchError ("GetEnv:"++name))
+ Left ("Fail:oldGetEnv:"++name)
else
- Str (unpackCString# str#)
+ Right (unpackCString# str#)
)
\end{verbatim}
@@ -246,18 +258,18 @@ directive to provide \tr{.h} files containing function headers.
For example,
\begin{verbatim}
-typedef unsigned long *StgMallocPtr;
+typedef unsigned long *StgForeignObj;
typedef long StgInt;
-void initialiseEFS PROTO( (StgInt size) );
-StgInt terminateEFS ();
-StgMallocPtr emptyEFS();
-StgMallocPtr updateEFS PROTO( (StgMallocPtr a, StgInt i, StgInt x) );
-StgInt lookupEFS PROTO( (StgMallocPtr a, StgInt i) );
+void initialiseEFS (StgInt size);
+StgInt terminateEFS (void);
+StgForeignObj emptyEFS(void);
+StgForeignObj updateEFS (StgForeignObj a, StgInt i, StgInt x);
+StgInt lookupEFS (StgForeignObj a, StgInt i);
\end{verbatim}
You can find appropriate definitions for \tr{StgInt},
-\tr{StgMallocPtr}, etc using \tr{gcc} on your architecture by
+\tr{StgForeignObj}, etc using \tr{gcc} on your architecture by
consulting \tr{ghc/includes/StgTypes.lh}. The following table
summarises the relationship between Haskell types and C types.
@@ -277,7 +289,7 @@ C type name & Haskell Type \\ \hline
\tr{StgByteArray} & \tr{MutableByteArray#}\\
\tr{StgStablePtr} & \tr{StablePtr#}\\
-\tr{StgMallocPtr} & \tr{MallocPtr#}
+\tr{StgForeignObj} & \tr{MallocPtr#}
\end{tabular}
Note that this approach is only {\em essential\/} for returning
@@ -301,32 +313,31 @@ unevaluated arguments and require the C programmer to force their
evaluation before using them.
\item Boxed values are stored on the Haskell heap and may be moved
-within the heap if a garbage collection occurs --- that is, pointers
+within the heap if a garbage collection occurs---that is, pointers
to boxed objects are not {\em stable\/}.
\end{itemize}
It is possible to subvert the unboxing process by creating a ``stable
-pointer'' to a value and passing the stable pointer instead. (To use
-stable pointers, you must \tr{import PreludeGlaMisc}.) For example, to
+pointer'' to a value and passing the stable pointer instead. For example, to
pass/return an integer lazily to C functions \tr{storeC} and
\tr{fetchC}, one might write:
\begin{verbatim}
storeH :: Int -> PrimIO ()
-storeH x = makeStablePtr x `thenPrimIO` \ stable_x ->
+storeH x = makeStablePtr x >>= \ stable_x ->
_ccall_ storeC stable_x
fetchH :: PrimIO Int
-fetchH x = _ccall_ fetchC `thenPrimIO` \ stable_x ->
- deRefStablePtr stable_x `thenPrimIO` \ x ->
- freeStablePtr stable_x `seqPrimIO`
- returnPrimIO x
+fetchH x = _ccall_ fetchC >>= \ stable_x ->
+ deRefStablePtr stable_x >>= \ x ->
+ freeStablePtr stable_x >>
+ return x
\end{verbatim}
The garbage collector will refrain from throwing a stable pointer away
until you explicitly call one of the following from C or Haskell.
\begin{verbatim}
void freeStablePointer( StgStablePtr stablePtrToToss )
-freeStablePtr :: _StablePtr a -> PrimIO ()
+freeStablePtr :: StablePtr a -> PrimIO ()
\end{verbatim}
As with the use of \tr{free} in C programs, GREAT CARE SHOULD BE
@@ -340,9 +351,9 @@ message from the runtime system); too late and you get space leaks.
%call one of the following C functions (according to type of argument).
%
%\begin{verbatim}
-%void performIO ( StgStablePtr stableIndex /* _StablePtr s (PrimIO ()) */ );
-%StgInt enterInt ( StgStablePtr stableIndex /* _StablePtr s Int */ );
-%StgFloat enterFloat ( StgStablePtr stableIndex /* _StablePtr s Float */ );
+%void performIO ( StgStablePtr stableIndex /* StablePtr s (PrimIO ()) */ );
+%StgInt enterInt ( StgStablePtr stableIndex /* StablePtr s Int */ );
+%StgFloat enterFloat ( StgStablePtr stableIndex /* StablePtr s Float */ );
%\end{verbatim}
%
%ToDo ADR: test these functions!
@@ -352,26 +363,25 @@ message from the runtime system); too late and you get space leaks.
%************************************************************************
%* *
-\subsubsection[glasgow-mallocPtrs]{Pointing outside the Haskell heap}
-\index{malloc pointers (Glasgow extension)}
+\subsubsection[glasgow-foreignObjs]{Pointing outside the Haskell heap}
+\index{foreign objects (Glasgow extension)}
%* *
%************************************************************************
There are two types that \tr{ghc} programs can use to reference
-(heap-allocated) objects outside the Haskell world: \tr{_Addr} and
-\tr{_MallocPtr}. (You must import \tr{PreludeGlaMisc} to use
-\tr{_MallocPtr}.)
+(heap-allocated) objects outside the Haskell world: \tr{Addr} and
+\tr{ForeignObj}.
-If you use \tr{_Addr}, it is up to you to the programmer to arrange
+If you use \tr{Addr}, it is up to you to the programmer to arrange
allocation and deallocation of the objects.
-If you use \tr{_MallocPtr}, \tr{ghc}'s garbage collector will
+If you use \tr{ForeignObj}, \tr{ghc}'s garbage collector will
call the user-supplied C function
\begin{verbatim}
-void FreeMallocPtr( StgMallocPtr garbageMallocPtr )
+void freeForeignObj( StgForeignObj garbageMallocPtr )
\end{verbatim}
when the Haskell world can no longer access the object. Since
-\tr{_MallocPtr}s only get released when a garbage collection occurs,
+\tr{ForeignObj}s only get released when a garbage collection occurs,
we provide ways of triggering a garbage collection from within C and
from within Haskell.
\begin{verbatim}
@@ -403,14 +413,13 @@ atan2d :: Double -> Double -> Double
atan2d y x = unsafePerformPrimIO (_ccall_ atan2d y x)
sincosd :: Double -> (Double, Double)
-sincosd x = unsafePerformPrimIO (
- newDoubleArray (0, 1) `thenPrimIO` \ da ->
+sincosd x = unsafePerformPrimIO $
+ newDoubleArray (0, 1) >>= \ da ->
_casm_ ``sincosd( %0, &((double *)%1[0]), &((double *)%1[1]) );'' x da
- `seqPrimIO`
- readDoubleArray da 0 `thenPrimIO` \ s ->
- readDoubleArray da 1 `thenPrimIO` \ c ->
- returnPrimIO (s, c)
- )
+ >>
+ readDoubleArray da 0 >>= \ s ->
+ readDoubleArray da 1 >>= \ c ->
+ return (s, c)
\end{verbatim}
\item Calling a set of functions which have side-effects but which can
@@ -426,34 +435,34 @@ lookup :: EFS a -> Int -> a
empty = unsafePerformPrimIO (_ccall_ emptyEFS)
-update a i x = unsafePerformPrimIO (
- makeStablePtr x `thenPrimIO` \ stable_x ->
+update a i x = unsafePerformPrimIO $
+ makeStablePtr x >>= \ stable_x ->
_ccall_ updateEFS a i stable_x
- )
-lookup a i = unsafePerformPrimIO (
- _ccall_ lookupEFS a i `thenPrimIO` \ stable_x ->
+lookup a i = unsafePerformPrimIO $
+ _ccall_ lookupEFS a i >>= \ stable_x ->
deRefStablePtr stable_x
- )
\end{verbatim}
-You will almost always want to use \tr{_MallocPtr}s with this.
+You will almost always want to use \tr{ForeignObj}s with this.
\item Calling a side-effecting function even though the results will
be unpredictable. For example the \tr{trace} function is defined by:
\begin{verbatim}
trace :: String -> a -> a
-trace string expr = unsafePerformPrimIO (
- appendChan# ``stderr'' "Trace On:\n" `seqPrimIO`
- appendChan# ``stderr'' string `seqPrimIO`
- appendChan# ``stderr'' "\nTrace Off.\n" `seqPrimIO`
- returnPrimIO expr )
+trace string expr
+ = unsafePerformPrimIO (
+ ((_ccall_ PreTraceHook sTDERR{-msg-}):: PrimIO ()) >>
+ fputs sTDERR string >>
+ ((_ccall_ PostTraceHook sTDERR{-msg-}):: PrimIO ()) >>
+ returnPrimIO expr )
+ where
+ sTDERR = (``stderr'' :: Addr)
\end{verbatim}
(This kind of use is not highly recommended --- it is only really
useful in debugging code.)
-
\end{itemize}
%************************************************************************
@@ -501,7 +510,7 @@ the intermediate C (\tr{.hc} file).
The compiler uses two non-standard type-classes when
type-checking the arguments and results of \tr{_ccall_}: the arguments
(respectively result) of \tr{_ccall_} must be instances of the class
-\tr{_CCallable} (respectively \tr{_CReturnable}. (Neither class
+\tr{CCallable} (respectively \tr{CReturnable}). (Neither class
defines any methods --- their only function is to keep the
type-checker happy.)
@@ -527,22 +536,22 @@ Type &CCallable&CReturnable & Which is probably... \\ \hline
%------ ---------- ------------ -------------
\tr{Char} & Yes & Yes & \tr{unsigned char} \\
\tr{Int} & Yes & Yes & \tr{long int} \\
-\tr{_Word} & Yes & Yes & \tr{unsigned long int} \\
-\tr{_Addr} & Yes & Yes & \tr{char *} \\
+\tr{Word} & Yes & Yes & \tr{unsigned long int} \\
+\tr{Addr} & Yes & Yes & \tr{char *} \\
\tr{Float} & Yes & Yes & \tr{float} \\
\tr{Double} & Yes & Yes & \tr{double} \\
\tr{()} & No & Yes & \tr{void} \\
\tr{[Char]} & Yes & No & \tr{char *} (null-terminated) \\
\tr{Array} & Yes & No & \tr{unsigned long *}\\
-\tr{_ByteArray} & Yes & No & \tr{unsigned long *}\\
-\tr{_MutableArray} & Yes & No & \tr{unsigned long *}\\
-\tr{_MutableByteArray} & Yes & No & \tr{unsigned long *}\\
-
-\tr{_State} & Yes & Yes & nothing!\\
-
-\tr{_StablePtr} & Yes & Yes & \tr{unsigned long *}\\
-\tr{_MallocPtr} & Yes & Yes & see later\\
+\tr{ByteArray} & Yes & No & \tr{unsigned long *}\\
+\tr{MutableArray} & Yes & No & \tr{unsigned long *}\\
+\tr{MutableByteArray} & Yes & No & \tr{unsigned long *}\\
+
+\tr{State} & Yes & Yes & nothing!\\
+
+\tr{StablePtr} & Yes & Yes & \tr{unsigned long *}\\
+\tr{ForeignObjs} & Yes & Yes & see later\\
\end{tabular}
The brave and careful programmer can add their own instances of these
@@ -550,7 +559,7 @@ classes for the following types:
\begin{itemize}
\item
A {\em boxed-primitive} type may be made an instance of both
-\tr{_CCallable} and \tr{_CReturnable}.
+\tr{CCallable} and \tr{CReturnable}.
A boxed primitive type is any data type with a
single unary constructor with a single primitive argument. For
@@ -560,27 +569,27 @@ example, the following are all boxed primitive types:
Int
Double
data XDisplay = XDisplay Addr#
-data EFS a = EFS# MallocPtr#
+data EFS a = EFS# ForeignObj#
\end{verbatim}
\begin{verbatim}
-instance _CCallable (EFS a)
-instance _CReturnable (EFS a)
+instance CCallable (EFS a)
+instance CReturnable (EFS a)
\end{verbatim}
\item Any datatype with a single nullary constructor may be made an
-instance of \tr{_CReturnable}. For example:
+instance of \tr{CReturnable}. For example:
\begin{verbatim}
data MyVoid = MyVoid
-instance _CReturnable MyVoid
+instance CReturnable MyVoid
\end{verbatim}
-\item As at version 0.26, \tr{String} (i.e., \tr{[Char]}) is still
-not a \tr{_CReturnable} type.
+\item As at version 2.01, \tr{String} (i.e., \tr{[Char]}) is still
+not a \tr{CReturnable} type.
-Also, the now-builtin type \tr{_PackedString} is neither
-\tr{_CCallable} nor \tr{_CReturnable}. (But there are functions in
+Also, the now-builtin type \tr{PackedString} is neither
+\tr{CCallable} nor \tr{CReturnable}. (But there are functions in
the PackedString interface to let you get at the necessary bits...)
\end{itemize}
@@ -606,6 +615,47 @@ hairy with a capital H!
%************************************************************************
%************************************************************************
+%* *
+\subsubsection[glasgow-prim-interface]{Access to the \tr{PrimIO} monad}
+\index{PrimIO monad (Glasgow extension)}
+\index{I/O, primitive (Glasgow extension)}
+%* *
+%************************************************************************
+
+The \tr{IO} monad (new in Haskell~1.3) catches errors and passes them
+along. It is built on top of the \tr{ST} state-transformer monad.
+
+A related (and inter-operable-with) monad is the \tr{PrimIO} monad
+(NB: the level at which @_ccall_@s work...), where you handle errors
+yourself.
+
+Should you wish to use the \tr{PrimIO} monad directly, you can import
+\tr{PreludeGlaST}. It makes available the usual monadic stuff (@>>=@,
+@>>@, @return@, etc.), as well as these functions:
+\begin{verbatim}
+-- for backward compatibility:
+returnPrimIO :: a -> PrimIO a
+thenPrimIO :: PrimIO a -> (a -> PrimIO b) -> PrimIO b
+seqPrimIO :: PrimIO a -> PrimIO b -> PrimIO b
+
+-- still useful:
+fixPrimIO :: (a -> PrimIO a) -> PrimIO a
+forkPrimIO :: PrimIO a -> PrimIO a
+listPrimIO :: [PrimIO a] -> PrimIO [a]
+mapAndUnzipPrimIO :: (a -> PrimIO (b,c)) -> [a] -> PrimIO ([b],[c])
+mapPrimIO :: (a -> PrimIO b) -> [a] -> PrimIO [b]
+
+unsafePerformPrimIO :: PrimIO a -> a
+unsafeInterleavePrimIO :: PrimIO a -> PrimIO a
+ -- and they are not called "unsafe" for nothing!
+
+-- to convert back and forth between IO and PrimIO
+ioToPrimIO :: IO a -> PrimIO a
+primIOToIO :: PrimIO a -> IO a
+\end{verbatim}
+
+
+%************************************************************************
%* *
\subsection[glasgow-hbc-exts]{``HBC-ish'' extensions implemented by GHC}
\index{HBC-like Glasgow extensions}
@@ -638,39 +688,39 @@ As Lennart says, ``This is a dubious feature and should not be used
carelessly.''
See also: \tr{SPECIALIZE instance} pragmas, in \Sectionref{faster}.
-
+%
%-------------------------------------------------------------------
-\item[Signal-handling I/O request:]
-\index{signal handling (extension)}
-\index{SigAction I/O request}
-The Haskell-1.2 I/O request \tr{SigAction n act} installs a signal handler for signal
-\tr{n :: Int}. The number is the usual UNIX signal number. The action
-is of this type:
-\begin{verbatim}
-data SigAct
- = SAIgnore
- | SADefault
- | SACatch Dialogue
-\end{verbatim}
-
-The corresponding continuation-style I/O function is the unsurprising:
-\begin{verbatim}
-sigAction :: Int -> SigAct -> FailCont -> SuccCont -> Dialogue
-\end{verbatim}
-
-When a signal handler is installed with \tr{SACatch}, receipt of the
-signal causes the current top-level computation to be abandoned, and
-the specified dialogue to be executed instead. The abandoned
-computation may leave some partially evaluated expressions in a
-non-resumable state. If you believe that your top-level computation
-and your signal handling dialogue may share subexpressions, you should
-execute your program with the \tr{-N} RTS option, to prevent
-black-holing.
-
-The \tr{-N} option is not available with concurrent/parallel programs,
-so great care should be taken to avoid shared subexpressions between
-the top-level computation and any signal handlers when using threads.
-
+% \item[Signal-handling I/O request:]
+% \index{signal handling (extension)}
+% \index{SigAction I/O request}
+% The Haskell-1.2 I/O request \tr{SigAction n act} installs a signal handler for signal
+% \tr{n :: Int}. The number is the usual UNIX signal number. The action
+% is of this type:
+% \begin{verbatim}
+% data SigAct
+% = SAIgnore
+% | SADefault
+% | SACatch Dialogue
+% \end{verbatim}
+%
+% The corresponding continuation-style I/O function is the unsurprising:
+% \begin{verbatim}
+% sigAction :: Int -> SigAct -> FailCont -> SuccCont -> Dialogue
+% \end{verbatim}
+%
+% When a signal handler is installed with \tr{SACatch}, receipt of the
+% signal causes the current top-level computation to be abandoned, and
+% the specified dialogue to be executed instead. The abandoned
+% computation may leave some partially evaluated expressions in a
+% non-resumable state. If you believe that your top-level computation
+% and your signal handling dialogue may share subexpressions, you should
+% execute your program with the \tr{-N} RTS option, to prevent
+% black-holing.
+%
+% The \tr{-N} option is not available with concurrent/parallel programs,
+% so great care should be taken to avoid shared subexpressions between
+% the top-level computation and any signal handlers when using threads.
+%
%-------------------------------------------------------------------
%\item[Simple time-out mechanism, in ``monadic I/O'':]
%\index{time-outs (extension)}
diff --git a/ghc/docs/users_guide/gone_wrong.lit b/ghc/docs/users_guide/gone_wrong.lit
index 960d3b7f36..7194106c49 100644
--- a/ghc/docs/users_guide/gone_wrong.lit
+++ b/ghc/docs/users_guide/gone_wrong.lit
@@ -49,40 +49,40 @@ how you get along.
This is a bug just as surely as a ``panic.'' Please report it.
%-------------------------------------------------------------------
-\item[``Some confusion about a value specialised to a type...'' Huh???]
-(A deeply obscure and unfriendly error message.)
-
-This message crops up when the typechecker sees a reference in an
-interface pragma to a specialisation of an overloaded value
-(function); for example, \tr{elem} specialised for type \tr{[Char]}
-(\tr{String}). The problem is: it doesn't {\em know} that such a
-specialisation exists!
-
-The cause of this problem is (please report any other cases...): The
-compiler has imported pragmatic info for the value in question from
-more than one interface, and the multiple interfaces did not agree
-{\em exactly} about the value's pragmatic info. Since the compiler
-doesn't know whom to believe, it believes none of them.
-
-The cure is to re-compile the modules that {\em re-export} the
-offending value (after possibly re-compiling its defining module).
-Now the pragmatic info should be exactly the same in every case, and
-things should be fine.
+% \item[``Some confusion about a value specialised to a type...'' Huh???]
+% (A deeply obscure and unfriendly error message.)
+%
+% This message crops up when the typechecker sees a reference in an
+% interface pragma to a specialisation of an overloaded value
+% (function); for example, \tr{elem} specialised for type \tr{[Char]}
+% (\tr{String}). The problem is: it doesn't {\em know} that such a
+% specialisation exists!
+%
+% The cause of this problem is (please report any other cases...): The
+% compiler has imported pragmatic info for the value in question from
+% more than one interface, and the multiple interfaces did not agree
+% {\em exactly} about the value's pragmatic info. Since the compiler
+% doesn't know whom to believe, it believes none of them.
+%
+% The cure is to re-compile the modules that {\em re-export} the
+% offending value (after possibly re-compiling its defining module).
+% Now the pragmatic info should be exactly the same in every case, and
+% things should be fine.
%-------------------------------------------------------------------
-\item[``Can't see the data constructors for a ccall/casm'' Huh?]
-GHC ``unboxes'' C-call arguments and ``reboxes'' C-call results for you.
-To do this, it {\\em has} to be able to see the types fully;
-abstract types won't do!
-
-Thus, if you say \tr{data Foo = Foo Int#}
-(a cool ``boxed primitive'' type), but then make it abstract
-(only \tr{data Foo} appears in the interface), then GHC can't figure
-out what to do with \tr{Foo} arguments/results to C-calls.
-
-Solutions: either make the type unabstract, or compile with \tr{-O}.
-With the latter, the constructor info will be passed along in
-the interface pragmas.
+% \item[``Can't see the data constructors for a ccall/casm'' Huh?]
+% GHC ``unboxes'' C-call arguments and ``reboxes'' C-call results for you.
+% To do this, it {\\em has} to be able to see the types fully;
+% abstract types won't do!
+%
+% Thus, if you say \tr{data Foo = Foo Int#}
+% (a cool ``boxed primitive'' type), but then make it abstract
+% (only \tr{data Foo} appears in the interface), then GHC can't figure
+% out what to do with \tr{Foo} arguments/results to C-calls.
+%
+% Solutions: either make the type unabstract, or compile with \tr{-O}.
+% With the latter, the constructor info will be passed along in
+% the interface pragmas.
%-------------------------------------------------------------------
\item[``This is a terrible error message.'']
@@ -222,7 +222,8 @@ extra checking) to your compilation options.
So, before you report a bug because of a core dump, you should probably:
\begin{verbatim}
% rm *.o # scrub your object files
-% make my_prog # re-make your program; use -hi-diffs to highlight changes
+% make my_prog # re-make your program; use -hi-diffs to highlight changes;
+ # as mentioned above, use -dcore-lint to be more paranoid
% ./my_prog ... # retry...
\end{verbatim}
@@ -259,7 +260,7 @@ arithmetic uncheckiness might cause an exception, too...
Glasgow Haskell is a changing system so there are sure to be bugs in
it. Please report them to
-\tr{glasgow-haskell-bugs@dcs.glasgow.ac.uk}! (However, please check
+\tr{glasgow-haskell-bugs@dcs.gla.ac.uk}! (However, please check
the earlier part of this section to be sure it's not a known
not-really-a problem.)
@@ -268,7 +269,7 @@ Don't omit them because ``Oh, they won't be interested...''
\begin{enumerate}
\item
What kind of machine are you running on, and exactly what version of the
-operating system are you using? (\tr{cat /etc/motd} often shows the desired
+operating system are you using? (\tr{uname -a} or \tr{cat /etc/motd} will show the desired
information.)
\item
diff --git a/ghc/docs/users_guide/how_to_run.lit b/ghc/docs/users_guide/how_to_run.lit
index 2403968c72..7c8ee0c02b 100644
--- a/ghc/docs/users_guide/how_to_run.lit
+++ b/ghc/docs/users_guide/how_to_run.lit
@@ -106,8 +106,7 @@ compilation system & ``start here''& ``stop after''& output file \\ \hline
literate pre-processor & .lhs & - & - \\
C pre-processor (opt.) & - & - & - \\
-Haskell parser & .hs & - & - \\
-Haskell compiler & - & -C, -S & .hc, .s \\
+Haskell compiler & .hs & -C, -S & .hc, .s \\
C compiler (opt.) & .hc or .c & -S & .s \\
assembler & .s & -c & .o \\
linker & other & - & a.out \\
@@ -183,7 +182,7 @@ The avoided ``dangerous'' optimisations are those that can make
runtime or space {\em worse} if you're unlucky. They are
normally turned on or off individually.
-As of version~0.26, \tr{-O2} is {\em unlikely} to produce
+At the moment, \tr{-O2} is {\em unlikely} to produce
better code than \tr{-O}.
% \item[\tr{-O0}:]
@@ -266,16 +265,13 @@ So, for example, you can say
strictness analyser.
The options you are most likely to want to turn off are:
-\tr{-fno-update-analysis}\index{-fno-update-analysis option} [because
-it is sometimes slow],
\tr{-fno-strictness}\index{-fno-strictness option} (strictness
analyser [because it is sometimes slow]),
\tr{-fno-specialise}\index{-fno-specialise option} (automatic
specialisation of overloaded functions [because it makes your code
bigger]) [US spelling also accepted],
and
-\tr{-fno-foldr-build}\index{-fno-foldr-build option} [because no-one
-knows what Andy Gill made it do].
+\tr{-fno-foldr-build}\index{-fno-foldr-build option}.
Should you wish to turn individual flags {\em on}, you are advised to
use the \tr{-Ofile} option, described above. Because the order in
@@ -286,11 +282,11 @@ Here are some ``dangerous'' optimisations you {\em might} want to try:
\begin{description}
%------------------------------------------------------------------
\item[\tr{-funfolding-creation-threshold<n>}:]
-(Default: 30) By raising or lowering this number, you can raise or lower the
-amount of pragmatic junk that gets spewed into interface files.
-(An unfolding has a ``size'' that reflects the cost in terms of ``code
-bloat'' of expanding that unfolding in another module. A bigger
-Core expression would be assigned a bigger cost.)
+(Default: 30) By raising or lowering this number, you can raise or
+lower the amount of pragmatic junk that gets spewed into interface
+files. (An unfolding has a ``size'' that reflects the cost in terms
+of ``code bloat'' of expanding that unfolding in another module. A
+bigger Core expression would be assigned a bigger cost.)
\item[\tr{-funfolding-use-threshold<n>}:]
(Default: 3) By raising or lowering this number, you can make the
@@ -316,13 +312,13 @@ with the bigger-but-better one? Yes, if the bigger one's ``size'' is
still under the ``override threshold.'' You can use this flag to
adjust this threshold (why, I'm not sure).
-\item[\tr{-fliberated-case-threshold<n>}:]
-(Default: 12) [Vastly obscure: NOT IMPLEMENTED YET]
-``Case liberation'' lifts evaluation out of recursive functions; it
-does this by duplicating code. Done without constraint, you can get
-serious code bloat; so we only do it if the ``size'' of the duplicated
-code is smaller than some ``threshold.'' This flag can fiddle that
-threshold.
+% \item[\tr{-fliberated-case-threshold<n>}:]
+% (Default: 12) [Vastly obscure: NOT IMPLEMENTED YET]
+% ``Case liberation'' lifts evaluation out of recursive functions; it
+% does this by duplicating code. Done without constraint, you can get
+% serious code bloat; so we only do it if the ``size'' of the duplicated
+% code is smaller than some ``threshold.'' This flag can fiddle that
+% threshold.
\item[\tr{-fsemi-tagging}:]
This option (which {\em does not work} with the native-code generator)
@@ -331,7 +327,7 @@ values. You win if you have lots of such values during a run of your
program, you lose otherwise. (And you pay in extra code space.)
We have not played with \tr{-fsemi-tagging} enough to recommend it.
-(For all we know, it doesn't even work in 0.26. Sigh.)
+(For all we know, it doesn't even work anymore... Sigh.)
\end{description}
%----------------------------------------------------------------------
@@ -365,23 +361,29 @@ We have not played with \tr{-fsemi-tagging} enough to recommend it.
Some flags only make sense for particular target platforms.
\begin{description}
+\item[\tr{-mv8}:]
+(SPARC machines)\index{-mv8 option (SPARC only)}
+Means to pass the like-named option to GCC; it says to use the
+Version 8 SPARC instructions, notably integer multiply and divide.
+The similiar \tr{-m*} GCC options for SPARC also work, actually.
+
\item[\tr{-mlong-calls}:]
(HPPA machines)\index{-mlong-calls option (HPPA only)}
Means to pass the like-named option to GCC. Required for Very Big
modules, maybe. (Probably means you're in trouble...)
-\item[\tr{-monly-[432]-regs}:]
+\item[\tr{-monly-[32]-regs}:]
(iX86 machines)\index{-monly-N-regs option (iX86 only)}
-GHC tries to ``steal'' five registers from GCC, for performance
+GHC tries to ``steal'' four registers from GCC, for performance
reasons; it almost always works. However, when GCC is compiling some
-modules with five stolen registers, it will crash, probably saying:
+modules with four stolen registers, it will crash, probably saying:
\begin{verbatim}
Foo.hc:533: fixed or forbidden register was spilled.
This may be due to a compiler bug or to impossible asm
statements or clauses.
\end{verbatim}
-Just give some registers back with \tr{-monly-N-regs}. Try `4' first,
-then `3', then `2'. If `2' doesn't work, please report the bug to us.
+Just give some registers back with \tr{-monly-N-regs}. Try `3' first,
+then `2'. If `2' doesn't work, please report the bug to us.
\end{description}
%----------------------------------------------------------------------
@@ -389,7 +391,7 @@ then `3', then `2'. If `2' doesn't work, please report the bug to us.
\index{optimisation by GCC}
\index{GCC optimisation}
-The C~compiler, normally GCC, is run with \tr{-O} turned on. (It has
+The C~compiler (GCC) is run with \tr{-O} turned on. (It has
to be, actually.)
If you want to run GCC with \tr{-O2}---which may be worth a few
@@ -440,7 +442,7 @@ One file is usually an {\em interface file}. If compiling
\tr{bar/Foo.hs}, the interface file would normally be \tr{bar/Foo.hi}.
The interface output may be directed to another file
\tr{bar2/Wurble.iface} with the option
-\tr{-ohi bar2/Wurble.iface}\index{-ohi <file> option}.
+\tr{-ohi bar2/Wurble.iface}\index{-ohi <file> option} (not recommended).
To avoid generating an interface file at all, use a \tr{-nohi}
option.\index{-nohi option}
@@ -451,6 +453,11 @@ the new one is byte-for-byte the same as the old one; this is friendly to
be informed. The \tr{-hi-diffs}\index{-hi-diffs option} option will
make \tr{ghc} run \tr{diff} on the old and new \tr{.hi} files.
+The \tr{.hi} files from GHC 2.xx contain ``usage'' information which
+changes often and uninterestingly. If you really want to see these
+changes reported, you need to use the
+\tr{-hi-diffs-with-usages}\index{-hi-diffs-with-usages option} option.
+
GHC's non-interface output normally goes into a \tr{.hc}, \tr{.o},
etc., file, depending on the last-run compilation phase. The option
\tr{-o foo}\index{-o option} re-directs the output of that last-run
@@ -495,7 +502,12 @@ that your read.
The \tr{-hisuf}/\tr{-osuf} game is useful if you want to compile a
program with both GHC and HBC (say) in the same directory. Let HBC
use the standard \tr{.hi}/\tr{.o} suffixes; add
-\tr{-hisuf _g.hi -osuf _g.o} to your \tr{make} rule for GHC compiling...
+\tr{-hisuf g_hi -osuf g_o} to your \tr{make} rule for GHC compiling...
+
+NB: {\em A change from 0.26 and before:} Before, you might have said
+\tr{-hisuf _g.hi -osuf _g.o}; now, the \tr{.} is assumed and you
+specify what comes {\em after} it. (This is a more portable solution
+for the long term.)
% THIS SHOULD HAPPEN AUTOMAGICALLY:
% If you want to change the suffix looked for on system-supplied
@@ -547,11 +559,9 @@ colon-separated list of \tr{dirs} to the ``import directories'' list.
A plain \tr{-i} resets the ``import directories'' list back to nothing.
-GHC normally imports \tr{PreludeCore.hi} and \tr{Prelude.hi} files for
-you. If you'd rather it didn't, then give it a
-\tr{-fno-implicit-prelude} option\index{-fno-implicit-prelude option}.
-(Sadly, it still has to {\em find} a \tr{PreludeNull_.hi} file; it
-just won't feed it into the compiler proper.) You are unlikely to get
+GHC normally imports \tr{Prelude.hi} files for you. If you'd rather
+it didn't, then give it a \tr{-fno-implicit-prelude}
+option\index{-fno-implicit-prelude option}. You are unlikely to get
very far without a Prelude, but, hey, it's a free country.
If you are using a system-supplied non-Prelude library (e.g., the HBC
@@ -610,7 +620,7 @@ Undefine macro \tr{<foo>} in the usual way.
\item[\tr{-I<dir>}:]
\index{-I<dir> option}
Specify a directory in which to look for \tr{#include} files, in
-the usual UNIX/C way.
+the usual C way.
\end{description}
The \tr{ghc} driver pre-defines several macros:
@@ -619,18 +629,18 @@ The \tr{ghc} driver pre-defines several macros:
\index{__HASKELL1__ macro}
If defined to $n$, that means GHC supports the
Haskell language defined in the Haskell report version $1.n$.
-Currently 2.
+Currently 3.
NB: This macro is set both when pre-processing Haskell source and
when pre-processing generated C (\tr{.hc}) files.
-If you give the \tr{-fhaskell-1.3} flag\index{-fhaskell-1.3 option},
-then \tr{__HASKELL1__} is set to 3. Obviously.
+% If you give the \tr{-fhaskell-1.3} flag\index{-fhaskell-1.3 option},
+% then \tr{__HASKELL1__} is set to 3. Obviously.
\item[\tr{__GLASGOW_HASKELL__}:]
\index{__GLASGOW_HASKELL__ macro}
For version $n$ of the GHC system, this will be \tr{#define}d to
-$100 \times n$. So, for version~0.26, it is 26.
+$100 \times n$. So, for version~2.01, it is 201.
This macro is {\em only} set when pre-processing Haskell source.
({\em Not} when pre-processing generated C.)
@@ -639,8 +649,7 @@ With any luck, \tr{__GLASGOW_HASKELL__} will be undefined in all other
implementations that support C-style pre-processing.
(For reference: the comparable symbols for other systems are:
-\tr{__YALE_HASKELL__} for Yale Haskell, \tr{__HBC__} for Chalmers
-HBC, and \tr{__GOFER__} for Gofer [I think].)
+\tr{__HUGS__} for Hugs and \tr{__HBC__} for Chalmers.)
\item[\tr{__CONCURRENT_HASKELL__}:]
\index{__CONCURRENT_HASKELL__ macro}
@@ -738,21 +747,23 @@ libraries automatically; these are:
-lHSrts,-lHSclib & basic runtime libraries \\
-lHS & standard Prelude library \\
+-lHS\_cbits & C support code for standard Prelude library \\
-lgmp & GNU multi-precision library (for Integers)\\
\end{tabular}
\index{-lHS library}
+\index{-lHS_cbits library}
\index{-lHSrts library}
\index{-lgmp library}
\item[\tr{-syslib <name>}:]
\index{-syslib <name> option}
-If you are using an optional GHC-supplied library (e.g., the HBC
+If you are using a Haskell ``system library'' (e.g., the HBC
library), just use the \tr{-syslib hbc} option, and the correct code
should be linked in.
-Please see \sectionref{syslibs} for information about optional
-GHC-supplied libraries.
+Please see \sectionref{syslibs} for information about
+``system libraries.''
\item[\tr{-L<dir>}:]
\index{-L<dir> option}
@@ -828,16 +839,16 @@ Report garbage-collection statistics. It will create a
Alternatively, if you'd rather the GC stats went straight to standard
error, you can ``cheat'' by using, instead: \tr{-optCrts-Sstderr}.
-
-\item[\tr{-Rhbc}:]
-\index{-Rhbc option}
-Tell the compiler it has an HBC-style RTS; i.e., it was compiled with
-HBC. Not used in Real Life.
-
-\item[\tr{-Rghc}:]
-\index{-Rghc option}
-Tell the compiler it has a GHC-style RTS; i.e., it was compiled with
-GHC. Not used in Real Life.
+%
+%\item[\tr{-Rhbc}:]
+%\index{-Rhbc option}
+%Tell the compiler it has an HBC-style RTS; i.e., it was compiled with
+%HBC. Not used in Real Life.
+%
+%\item[\tr{-Rghc}:]
+%\index{-Rghc option}
+%Tell the compiler it has a GHC-style RTS; i.e., it was compiled with
+%GHC. Not used in Real Life.
\end{description}
For all \tr{<size>}s: If the last character of \tr{size} is a K,
@@ -873,31 +884,32 @@ programming, please see \Sectionref{concurrent-and-parallel}.
%************************************************************************
%* *
-\subsection[options-experimental]{For experimental purposes}
-\index{experimental options}
+%\subsection[options-experimental]{For experimental purposes}
+%\index{experimental options}
%* *
%************************************************************************
-From time to time, we provide GHC options for ``experimenting.'' Easy
-come, easy go. In version~0.26, the ``experimental'' options are:
-\begin{description}
-\item[\tr{-firrefutable-tuples} option:]
-\index{-firrefutable-tuples option (experimental)}
-Pretend that every tuple pattern is irrefutable; i.e., has a
-``twiddle'' (\tr{~}) in front of it.
-
-Some parts of the GHC system {\em depend} on strictness properties which
-\tr{-firrefutable-tuples} may undo, notably the low-level state-transformer
-stuff, which includes I/O (!). You're on your own...
-
-\item[\tr{-fall-strict} option:]
-\index{-fall-strict option (experimental)}
-(DOESN'T REALLY WORK, I THINK) Changes the strictness analyser so
-that, when it asks the question ``Is this function argument certain to
-be evaluated?'', the answer is always ``yes''.
-
-Compilation is changed in no other way.
-\end{description}
+%From time to time, we provide GHC options for ``experimenting.'' Easy
+%come, easy go. In version~0.26, the ``experimental'' options are:
+%\begin{description}
+%\item[\tr{-firrefutable-tuples} option:]
+%\index{-firrefutable-tuples option (experimental)}
+%Pretend that every tuple pattern is irrefutable; i.e., has a
+%``twiddle'' (\tr{~}) in front of it.
+%
+%Some parts of the GHC system {\em depend} on strictness properties which
+%\tr{-firrefutable-tuples} may undo, notably the low-level state-transformer
+%stuff, which includes I/O (!). You're on your own...
+%
+%\item[\tr{-fall-strict} option:]
+%\index{-fall-strict option (experimental)}
+% (DOESN'T REALLY WORK, I THINK) Changes the strictness analyser so
+%that, when it asks the question ``Is this function argument certain to
+%be evaluated?'', the answer is always ``yes''.
+%
+%Compilation is changed in no other way.
+%\end{description}
+
% -firrefutable-everything
% -fall-demanded
@@ -919,7 +931,7 @@ HACKER TERRITORY. HACKER TERRITORY.
You may specify that a different program
be used for one of the phases of the compilation system, in place of
whatever the driver \tr{ghc} has wired into it. For example, you
-might want to test a replacement parser. The
+might want to try a different assembler. The
\tr{-pgm<phase-code><program-name>}\index{-pgm<phase><stuff> option} option to
\tr{ghc} will cause it to use \pl{<program-name>} for phase
\pl{<phase-code>}, where the codes to indicate the phases are:
@@ -928,17 +940,12 @@ might want to test a replacement parser. The
code & phase \\ \hline
L & literate pre-processor \\
P & C pre-processor (if -cpp only) \\
-p & parser \\
C & Haskell compiler \\
-cO & C compiler for `optimised' (normal) compiling \\
-c & C compiler for `unregisterised' compiling \\
+c & C compiler\\
a & assembler \\
l & linker \\
\end{tabular}
-If you use the ambiguous \tr{-pgmcOle}, it will take it to mean
-``use program \tr{le} for optimised C compiling.''
-
%----------------------------------------------------------------------
\subsubsection[forcing-options-through]{Forcing options to a particular phase.}
\index{forcing GHC-phase options}
@@ -962,8 +969,8 @@ So, for example: when I want to use my normal driver but with my
profiled compiler binary, I use this script:
\begin{verbatim}
#! /bin/sh
-exec /local/grasp_tmp3/partain/ghc-BUILDS/working-alpha/ghc/driver/ghc \
- -pgmC/local/grasp_tmp3/partain/ghc-BUILDS/working-hsc-prof/hsc \
+exec /local/grasp_tmp3/simonpj/ghc-BUILDS/working-alpha/ghc/driver/ghc \
+ -pgmC/local/grasp_tmp3/simonpj/ghc-BUILDS/working-hsc-prof/hsc \
-optCrts-i0.5 \
-optCrts-PT \
"$@"
@@ -987,10 +994,11 @@ example: \tr{ghc -noC -ddump-simpl Foo.hs}
conjunction with \tr{-noC}, which turns off interface generation;
thus: \tr{-noC -hi}.
-\item[\tr{-ddump-parser}:]
-\index{-ddump-parser option}
-This debugging option shows the exact prefix-form Haskell that is fed
-into the Haskell compiler proper.
+\item[\tr{-dshow-passes}:]
+\index{-dshow-passes option}
+Prints a message to stderr as each pass starts. Gives a warm but
+undoubtedly misleading feeling that GHC is telling you what's
+happening.
\item[\tr{-ddump-<pass>}:]
\index{-ddump-<pass> options}
@@ -998,17 +1006,14 @@ Make a debugging dump after pass \tr{<pass>} (may be common enough to
need a short form...). Some of the most useful ones are:
\begin{tabular}{ll}
-\tr{-ddump-rif2hs} & reader output (earliest stuff in the compiler) \\
-\tr{-ddump-rn4} & renamer output \\
+\tr{-ddump-rdr} & reader output (earliest stuff in the compiler) \\
+\tr{-ddump-rn} & renamer output \\
\tr{-ddump-tc} & typechecker output \\
\tr{-ddump-deriv} & derived instances \\
\tr{-ddump-ds} & desugarer output \\
\tr{-ddump-simpl} & simplifer output (Core-to-Core passes) \\
- & (and don't forget \tr{-O}, too!) \\
\tr{-ddump-stranal} & strictness analyser output \\
- & (only works in conjunction with -ddump-simpl)\\
\tr{-ddump-occur-anal} & `occurrence analysis' output \\
- & (only works in conjunction with -ddump-simpl)\\
\tr{-ddump-spec} & dump specialisation info \\
\tr{-ddump-stg} & output of STG-to-STG passes \\
\tr{-ddump-absC} & {\em un}flattened Abstract~C \\
@@ -1016,8 +1021,8 @@ need a short form...). Some of the most useful ones are:
\tr{-ddump-realC} & same as what goes to the C compiler \\
\tr{-ddump-asm} & assembly language from the native-code generator \\
\end{tabular}
-\index{-ddump-rif2hs option}%
-\index{-ddump-rn4 option}%
+\index{-ddump-rdr option}%
+\index{-ddump-rn option}%
\index{-ddump-tc option}%
\index{-ddump-deriv option}%
\index{-ddump-ds option}%
@@ -1060,10 +1065,10 @@ individual type variables) is displayed.
\item[\tr{-ddump-raw-asm}:]
\index{-ddump-raw-asm option}
Dump out the assembly-language stuff, before the ``mangler'' gets it.
-
-\item[\tr{-dgc-debug}:]
-\index{-dgc-debug option}
-Enables some debugging code related to the garbage-collector.
+%
+%\item[\tr{-dgc-debug}:]
+%\index{-dgc-debug option}
+%Enables some debugging code related to the garbage-collector.
\end{description}
%ToDo: -ddump-asm-insn-counts
@@ -1168,38 +1173,38 @@ Main.skip2{-r1L6-} =
trademark of Peyton Jones Enterprises, plc.)
%----------------------------------------------------------------------
-\subsubsection[arity-checking]{Options to insert arity-checking code}
-\index{arity checking}
-
-The \tr{-darity-checks}\index{-darity-checks option} option inserts
-code to check for arity violations. Unfortunately, it's not that
-simple: you have to link with a prelude that was also built with arity
-checks. If you have one, then great; otherwise...
-
-The \tr{-darity-checks-C-only}\index{-darity-checks-C-only option}
-option inserts the self-same arity checking code into \tr{.hc} files,
-but doesn't compile it into the \tr{.o} files. We use this flag with
-the \tr{-keep-hc-file-too}\index{-keep-hc-file-too option}, where we
-are keeping \tr{.hc} files around for debugging purposes.
+%\subsubsection[arity-checking]{Options to insert arity-checking code}
+%\index{arity checking}
+%
+%The \tr{-darity-checks}\index{-darity-checks option} option inserts
+%code to check for arity violations. Unfortunately, it's not that
+%simple: you have to link with a prelude that was also built with arity
+%checks. If you have one, then great; otherwise...
+%
+%The \tr{-darity-checks-C-only}\index{-darity-checks-C-only option}
+%option inserts the self-same arity checking code into \tr{.hc} files,
+%but doesn't compile it into the \tr{.o} files. We use this flag with
+%the \tr{-keep-hc-file-too}\index{-keep-hc-file-too option}, where we
+%are keeping \tr{.hc} files around for debugging purposes.
%----------------------------------------------------------------------
-\subsubsection[omit-checking]{Options to omit checking code}
-\index{omitting runtime checks}
-
-By default, the GHC system emits all possible not-too-expensive
-runtime checking code. If you are brave or experimenting, you might
-want to turn off some of this (not recommended):
-
-\begin{tabular}{ll}
--dno-black-holing & won't buy you much (even if it works) \\
--dno-updates & you're crazy if you do this \\
--dno-stk-stubbing & omit stack stubbing (NOT DONE YET) \\
-\end{tabular}
-\index{-dno-black-holing option}%
-\index{-dno-updates option}%
-\index{-dno-stk-stubbing option}
-
-Warning: all very lightly tested, if at all...
+%\subsubsection[omit-checking]{Options to omit checking code}
+%\index{omitting runtime checks}
+%
+%By default, the GHC system emits all possible not-too-expensive
+%runtime checking code. If you are brave or experimenting, you might
+%want to turn off some of this (not recommended):
+%
+%\begin{tabular}{ll}
+%-dno-black-holing & won't buy you much (even if it works) \\
+%-dno-updates & you're crazy if you do this \\
+%-dno-stk-stubbing & omit stack stubbing (NOT DONE YET) \\
+%\end{tabular}
+%\index{-dno-black-holing option}%
+%\index{-dno-updates option}%
+%\index{-dno-stk-stubbing option}
+%
+%Warning: all very lightly tested, if at all...
%% %************************************************************************
%% %* *
diff --git a/ghc/docs/users_guide/intro.lit b/ghc/docs/users_guide/intro.lit
index 4a85d28a9a..82b6e93f95 100644
--- a/ghc/docs/users_guide/intro.lit
+++ b/ghc/docs/users_guide/intro.lit
@@ -1,11 +1,11 @@
%
-% $Header: /srv/cvs/cvs.haskell.org/fptools/ghc/docs/users_guide/Attic/intro.lit,v 1.1 1996/01/08 20:25:10 partain Exp $
+% $Header: /srv/cvs/cvs.haskell.org/fptools/ghc/docs/users_guide/Attic/intro.lit,v 1.2 1996/07/25 20:48:26 partain Exp $
%
\section[introduction-GHC]{Introduction to GHC}
This is a guide to using the Glasgow Haskell compilation (GHC) system.
-It is a batch compiler for the Haskell~1.2 language, with support for
-various extensions, including the DRAFT 1.3 I/O proposal.
+It is a batch compiler for the Haskell~1.3 language, with support for
+various Glasgow-only extensions.
Many people will use GHC very simply: compile some
modules---\tr{ghc -c -O Foo.hs Bar.hs}; and link them---
@@ -30,8 +30,8 @@ material in \sectionref{compiler-tutorial} may help.
On the World-Wide Web, there are several URLs of likely interest:
\begin{display}
-GHC home page -- http://www.dcs.glasgow.ac.uk/fp/software/ghc.html
-Glasgow FP group page -- http://www.dcs.glasgow.ac.uk/fp/
+GHC home page -- http://www.dcs.gla.ac.uk/fp/software/ghc/
+Glasgow FP group page -- http://www.dcs.gla.ac.uk/fp/
comp.lang.functional FAQ -- http://www.cs.nott.ac.uk/Department/Staff/mpj/faq.html
programming language research page --
http://www.cs.cmu.edu/afs/cs.cmu.edu/user/mleone/web/language-research.html
@@ -41,29 +41,53 @@ join, as you feel is appropriate.
\begin{description}
\item[glasgow-haskell-users:]
This list is for GHC users to chat among themselves. Subscribe by
-sending mail to \tr{glasgow-haskell-users-request@dcs.glasgow.ac.uk}.
-Messages for the list go to \tr{glasgow-haskell-users}.
+sending mail to \tr{majordomo@dcs.gla.ac.uk}, with
+a message body (not header) like this:
+\begin{verbatim}
+subscribe glasgow-haskell-users My Name <m.y.self@bigbucks.com>
+\end{verbatim}
+(The last bit is your all-important e-mail address, of course.)
+
+To communicate with your fellow users, send mail to \tr{glasgow-haskell-users}.
+
+To contact the list administrator, send mail to
+\tr{glasgow-haskell-users-request}. An archive of the list is
+available on the Web at:
+\begin{verbatim}
+\tr{http://www.dcs.gla.ac.uk/mail-www/glasgow-haskell-users}.
+\end{verbatim}
\item[glasgow-haskell-bugs:]
Send bug reports for GHC to this address! The sad and lonely people
who subscribe to this list will muse upon what's wrong and what you
might do about it.
-Subscribe via \tr{glasgow-haskell-bugs-request@dcs.glasgow.ac.uk}.
+Subscribe via \tr{majordomo@dcs.gla.ac.uk} with:
+\begin{verbatim}
+subscribe glasgow-haskell-bugs My Name <m.y.self@hackers.r.us>
+\end{verbatim}
+
+Again, you may contact the list administrator at
+\tr{glasgow-haskell-bugs-request}.
+And, yes, an archive of the list is
+available on the Web at:
+\begin{verbatim}
+\tr{http://www.dcs.gla.ac.uk/mail-www/glasgow-haskell-bugs}.
+\end{verbatim}
\end{description}
There is also the general Haskell mailing list. Subscribe by sending
-email to \tr{haskell-request@dcs.glasgow.ac.uk} or
-\tr{haskell-request@cs.yale.edu}, whichever is geographically closer
-to you.
+email to \tr{majordomo@dcs.gla.ac.uk}, with the usual message body:
+\begin{verbatim}
+subscribe haskell My Name <m.y.self@fp.rules.ok.org>
+\end{verbatim}
Some Haskell-related discussion takes place in the Usenet newsgroup
-\tr{comp.lang.functional}. (But note: news propagation to Glasgow can
-be terrible. That's one reason Glaswegians aren't too active in
-c.f.l.)
+\tr{comp.lang.functional}. (But note: news is basically dead at Glasgow.
+That's one reason Glaswegians aren't too active in c.f.l.)
The main anonymous-FTP site for Glasgow Haskell is
-\tr{ftp.dcs.glasgow.ac.uk}, in \tr{pub/haskell/glasgow/}.
+\tr{ftp.dcs.gla.ac.uk}, in \tr{pub/haskell/glasgow/}.
``Important'' bits are mirrored at other Haskell archive sites (and we
have their stuff, too).
diff --git a/ghc/docs/users_guide/libraries.lit b/ghc/docs/users_guide/libraries.lit
index dbe7b00a1b..891d9b1d5e 100644
--- a/ghc/docs/users_guide/libraries.lit
+++ b/ghc/docs/users_guide/libraries.lit
@@ -9,13 +9,18 @@
We intend to provide more and more ready-to-use Haskell code, so that
every program doesn't have to invent everything from scratch.
-At the moment, we supply a part of the HBC library, as well as the
-beginnings of one of our own (``GHC library'').
-
If you provide a \tr{-syslib <name>}\index{-syslib <name> option} option,
then the interfaces for that library will come into scope (and may be
\tr{import}ed), and the code will be added in at link time.
+We supply a part of the HBC library (\tr{-syslib hbc}); as well as one
+of our own (\tr{-syslib ghc}); one for an interface to POSIX routines
+(\tr{-syslib posix}); and one of contributed stuff off the net, mostly
+numerical (\tr{-syslib contrib}).
+
+If you have Haggis (our GUI X~toolkit for Haskell), it probably works
+with a \tr{-syslib haggis} flag.
+
%************************************************************************
%* *
\subsection[GHC-library]{The GHC system library}
@@ -48,8 +53,18 @@ unitBag :: elt -> Bag elt
unionBags :: Bag elt -> Bag elt -> Bag elt
unionManyBags :: [Bag elt] -> Bag elt
+consBag :: elt -> Bag elt -> Bag elt
snocBag :: Bag elt -> elt -> Bag elt
+concatBag :: Bag (Bag a) -> Bag a
+mapBag :: (a -> b) -> Bag a -> Bag b
+
+foldBag :: (r -> r -> r) -- Replace TwoBags with this; should be associative
+ -> (a -> r) -- Replace UnitBag with this
+ -> r -- Replace EmptyBag with this
+ -> Bag a
+ -> r
+
elemBag :: Eq elt => elt -> Bag elt -> Bool
isEmptyBag :: Bag elt -> Bool
filterBag :: (elt -> Bool) -> Bag elt -> Bag elt
@@ -78,7 +93,7 @@ to kill you.'' --JSM]
mkBS :: [Int] -> BitSet
listBS :: BitSet -> [Int]
emptyBS :: BitSet
-singletonBS :: Int -> BitSet
+unitBS :: Int -> BitSet
unionBS :: BitSet -> BitSet -> BitSet
minusBS :: BitSet -> BitSet -> BitSet
@@ -109,7 +124,7 @@ Guess what? The implementation uses balanced trees.
\begin{verbatim}
-- BUILDING
emptyFM :: FiniteMap key elt
-singletonFM :: key -> elt -> FiniteMap key elt
+unitFM :: key -> elt -> FiniteMap key elt
listToFM :: Ord key => [(key,elt)] -> FiniteMap key elt
-- In the case of duplicates, the last is taken
@@ -197,19 +212,18 @@ intersectingLists :: Eq a => [a] -> [a] -> Bool
%* *
%************************************************************************
-Note: a \tr{Maybe} type is nearly inevitable in Haskell~1.3.
-You should use this module with \tr{-fhaskell-1.3}.
+The \tr{Maybe} type itself is in the Haskell~1.3 prelude. Moreover,
+the required \tr{Maybe} library provides many useful functions on
+\tr{Maybe}s. This (old) module provides more.
-Two non-abstract types:
+An \tr{Either}-like type called \tr{MaybeErr}:
\begin{verbatim}
-data Maybe a = Nothing | Just a -- Prelude; re-exported
data MaybeErr val err = Succeeded val | Failed err
\end{verbatim}
Some operations to do with \tr{Maybe} (some commentary follows):
\begin{verbatim}
maybeToBool :: Maybe a -> Bool -- Nothing => False; Just => True
-catMaybes :: [Maybe a] -> [a]
allMaybes :: [Maybe a] -> Maybe [a]
firstJust :: [Maybe a] -> Maybe a
findJust :: (a -> Maybe b) -> [a] -> Maybe b
@@ -218,6 +232,10 @@ assocMaybe :: Eq a => [(a,b)] -> a -> Maybe b
mkLookupFun :: (key -> key -> Bool) -- Equality predicate
-> [(key,val)] -- The assoc list
-> (key -> Maybe val) -- A lookup fun to use
+mkLookupFunDef :: (key -> key -> Bool) -- Ditto, with a default
+ -> [(key,val)]
+ -> val -- the default
+ -> (key -> val) -- NB: not a Maybe anymore
-- a monad thing
thenMaybe :: Maybe a -> (a -> Maybe b) -> Maybe b
@@ -226,8 +244,7 @@ failMaybe :: Maybe a
mapMaybe :: (a -> Maybe b) -> [a] -> Maybe [b]
\end{verbatim}
-@catMaybes@ takes a list of @Maybe@s and returns a list of the
-contents of all the @Just@s in it.
+NB: @catMaybes@, which used to be here, is in the Haskell~1.3 libraries.
@allMaybes@ collects a list of @Justs@ into a single @Just@, returning
@Nothing@ if there are any @Nothings@.
@@ -262,72 +279,62 @@ accumulating any errors that occur.
%************************************************************************
%* *
-\subsubsection[PackedString]{The @_PackedString@ type}
+\subsubsection[PackedString]{The @PackedString@ type}
\index{PackedString module (GHC syslib)}
%* *
%************************************************************************
-The type \tr{_PackedString} is built-in, i.e., no
-special action (other than a \tr{-fglasgow-exts} flag) is required to
-use it.
-
-The documentation here describes the {\em built-in} functions.
-
-You may also access this code as a system library and {\em not} use
-the \tr{-fglasgow-exts} flag. Just do \tr{import PackedString},
-heave in your \tr{-syslib ghc}, and drop off the leading underscores
-which you see here.
-
-We still may change this interface (again).
+You need \tr{import PackedString}, and
+heave in your \tr{-syslib ghc}.
The basic type and functions which are available are:
\begin{verbatim}
-data _PackedString
-
-_packString :: [Char] -> _PackedString
-_packStringST :: [Char] -> _ST s _PackedString
-_packCString :: _Addr -> _PackedString
-_packCBytes :: Int -> _Addr -> _PackedString
-_packCBytesST :: Int -> _Addr -> _ST s _PackedString
-_packBytesForC :: [Char] -> _ByteArray Int
-_packBytesForCST :: [Char] -> _ST s (_ByteArray Int)
-_byteArrayToPS :: _ByteArray Int -> _PackedString
-_psToByteArray :: _PackedString -> _ByteArray Int
-
-_unpackPS :: _PackedString -> [Char]
+data PackedString
+
+packString :: [Char] -> PackedString
+packStringST :: [Char] -> ST s PackedString
+packCString :: Addr -> PackedString
+packCBytes :: Int -> Addr -> PackedString
+packCBytesST :: Int -> Addr -> ST s PackedString
+packBytesForC :: [Char] -> ByteArray Int
+packBytesForCST :: [Char] -> ST s (ByteArray Int)
+byteArrayToPS :: ByteArray Int -> PackedString
+psToByteArray :: PackedString -> ByteArray Int
+
+unpackPS :: PackedString -> [Char]
\end{verbatim}
We also provide a wad of list-manipulation-like functions:
\begin{verbatim}
-_nilPS :: _PackedString
-_consPS :: Char -> _PackedString -> _PackedString
-
-_headPS :: _PackedString -> Char
-_tailPS :: _PackedString -> _PackedString
-_nullPS :: _PackedString -> Bool
-_appendPS :: _PackedString -> _PackedString -> _PackedString
-_lengthPS :: _PackedString -> Int
-_indexPS :: _PackedString -> Int -> Char
- -- 0-origin indexing into the string
-_mapPS :: (Char -> Char) -> _PackedString -> _PackedString {-or String?-}
-_filterPS :: (Char -> Bool) -> _PackedString -> _PackedString {-or String?-}
-_foldlPS :: (a -> Char -> a) -> a -> _PackedString -> a
-_foldrPS :: (Char -> a -> a) -> a -> _PackedString -> a
-_takePS :: Int -> _PackedString -> _PackedString
-_dropPS :: Int -> _PackedString -> _PackedString
-_splitAtPS :: Int -> _PackedString -> (_PackedString, _PackedString)
-_takeWhilePS:: (Char -> Bool) -> _PackedString -> _PackedString
-_dropWhilePS:: (Char -> Bool) -> _PackedString -> _PackedString
-_spanPS :: (Char -> Bool) -> _PackedString -> (_PackedString, _PackedString)
-_breakPS :: (Char -> Bool) -> _PackedString -> (_PackedString, _PackedString)
-_linesPS :: _PackedString -> [_PackedString]
-_wordsPS :: _PackedString -> [_PackedString]
-_reversePS :: _PackedString -> _PackedString
-_concatPS :: [_PackedString] -> _PackedString
-
-_substrPS :: _PackedString -> Int -> Int -> _PackedString
- -- pluck out a piece of a _PS
- -- start and end chars you want; both 0-origin-specified
+nilPS :: PackedString
+consPS :: Char -> PackedString -> PackedString
+
+headPS :: PackedString -> Char
+tailPS :: PackedString -> PackedString
+nullPS :: PackedString -> Bool
+appendPS :: PackedString -> PackedString -> PackedString
+lengthPS :: PackedString -> Int
+indexPS :: PackedString -> Int -> Char
+ -- 0-origin indexing into the string
+mapPS :: (Char -> Char) -> PackedString -> PackedString {-or String?-}
+filterPS :: (Char -> Bool) -> PackedString -> PackedString {-or String?-}
+foldlPS :: (a -> Char -> a) -> a -> PackedString -> a
+foldrPS :: (Char -> a -> a) -> a -> PackedString -> a
+takePS :: Int -> PackedString -> PackedString
+dropPS :: Int -> PackedString -> PackedString
+splitAtPS :: Int -> PackedString -> (PackedString, PackedString)
+takeWhilePS:: (Char -> Bool) -> PackedString -> PackedString
+dropWhilePS:: (Char -> Bool) -> PackedString -> PackedString
+spanPS :: (Char -> Bool) -> PackedString -> (PackedString, PackedString)
+breakPS :: (Char -> Bool) -> PackedString -> (PackedString, PackedString)
+linesPS :: PackedString -> [PackedString]
+wordsPS :: PackedString -> [PackedString]
+reversePS :: PackedString -> PackedString
+concatPS :: [PackedString] -> PackedString
+
+substrPS :: PackedString -> Int -> Int -> PackedString
+ -- pluck out a piece of a PS
+ -- start and end chars you want; both 0-origin-specified
\end{verbatim}
%************************************************************************
@@ -373,6 +380,10 @@ ppInterleave :: Pretty -> [Pretty] -> Pretty -- spacing between
ppIntersperse :: Pretty -> [Pretty] -> Pretty -- no spacing between
ppNest :: Int -> Pretty -> Pretty
ppSep :: [Pretty] -> Pretty
+
+ppBracket :: Pretty -> Pretty -- [ ... ] around something
+ppParens :: Pretty -> Pretty -- ( ... ) around something
+ppQuote :: Pretty -> Pretty -- ` ... ' around something
\end{verbatim}
%************************************************************************
@@ -413,27 +424,33 @@ too surprised if this stuff moves/gets-renamed/etc.
\begin{verbatim}
-- general list processing
-forall :: (a -> Bool) -> [a] -> Bool
exists :: (a -> Bool) -> [a] -> Bool
-zipEqual :: [a] -> [b] -> [(a,b)]
-nOfThem :: Int -> a -> [a]
-lengthExceeds :: [a] -> Int -> Bool
+forall :: (a -> Bool) -> [a] -> Bool
isSingleton :: [a] -> Bool
+lengthExceeds :: [a] -> Int -> Bool
+mapAndUnzip :: (a -> (b, c)) -> [a] -> ([b], [c])
+mapAndUnzip3 :: (a -> (b, c, d)) -> [a] -> ([b], [c], [d])
+nOfThem :: Int -> a -> [a]
+zipEqual :: [a] -> [b] -> [(a,b)]
+zipWithEqual :: String -> (a->b->c) -> [a]->[b]->[c]
+zipWith3Equal :: String -> (a->b->c->d) -> [a]->[b]->[c]->[d]
+zipWith4Equal :: String -> (a->b->c->d->e) -> [a]->[b]->[c]->[d]->[e]
+zipLazy :: [a] -> [b] -> [(a,b)] -- lazy in 2nd arg
-- association lists
assoc :: Eq a => String -> [(a, b)] -> a -> b
-- duplicate handling
hasNoDups :: Eq a => [a] -> Bool
-equivClasses :: (a -> a -> _CMP_TAG) -> [a] -> [[a]]
+equivClasses :: (a -> a -> Ordering) -> [a] -> [[a]]
runs :: (a -> a -> Bool) -> [a] -> [[a]]
-removeDups :: (a -> a -> _CMP_TAG) -> [a] -> ([a], [[a]])
+removeDups :: (a -> a -> Ordering) -> [a] -> ([a], [[a]])
-- sorting (don't complain of no choice...)
quicksort :: (a -> a -> Bool) -> [a] -> [a]
sortLt :: (a -> a -> Bool) -> [a] -> [a]
stableSortLt :: (a -> a -> Bool) -> [a] -> [a]
-mergesort :: (a -> a -> _CMP_TAG) -> [a] -> [a]
+mergesort :: (a -> a -> Ordering) -> [a] -> [a]
mergeSort :: Ord a => [a] -> [a]
naturalMergeSort :: Ord a => [a] -> [a]
mergeSortLe :: Ord a => [a] -> [a]
@@ -461,10 +478,7 @@ mapAccumB :: (accl -> accr -> x -> (accl, accr,y))
-> (accl, accr, [y])
-- comparisons
-cmpString :: String -> String -> _CMP_TAG
-
--- this type is built-in
-data _CMP_TAG = _LT | _EQ | _GT
+cmpString :: String -> String -> Ordering
-- pairs
applyToPair :: ((a -> c), (b -> d)) -> (a, b) -> (c, d)
@@ -500,7 +514,7 @@ Readline library. As such, you will need to look at the GNU
documentation (and have a \tr{libreadline.a} file around somewhere...)
You'll need to link any Readlining program with \tr{-lreadline -ltermcap},
-besides the usual \tr{-syslib ghc} (and \tr{-fhaskell-1.3}).
+besides the usual \tr{-syslib ghc}.
The main function you'll use is:
\begin{verbatim}
@@ -535,7 +549,7 @@ will see in the GNU readline documentation.)
The \tr{Regex} library provides quite direct interface to the GNU
regular-expression library, for doing manipulation on
-\tr{_PackedString}s. You probably need to see the GNU documentation
+\tr{PackedString}s. You probably need to see the GNU documentation
if you are operating at this level.
The datatypes and functions that \tr{Regex} provides are:
@@ -553,18 +567,18 @@ data REmatch
-- matched inside a string, e.g.
--
-- matching "reg(exp)" "a regexp" returns the pair (5,7) for the
--- (exp) group. (_PackedString indices start from 0)
+-- (exp) group. (PackedString indices start from 0)
type GroupBounds = (Int, Int)
re_compile_pattern
- :: _PackedString -- pattern to compile
+ :: PackedString -- pattern to compile
-> Bool -- True <=> assume single-line mode
-> Bool -- True <=> case-insensitive
-> PrimIO PatBuffer
re_match :: PatBuffer -- compiled regexp
- -> _PackedString -- string to match
+ -> PackedString -- string to match
-> Int -- start position
-> Bool -- True <=> record results in registers
-> PrimIO (Maybe REmatch)
@@ -575,23 +589,23 @@ re_match :: PatBuffer -- compiled regexp
-- into one massive heap chunk, but load (smaller chunks) on demand.
re_match2 :: PatBuffer -- 2-string version
- -> _PackedString
- -> _PackedString
+ -> PackedString
+ -> PackedString
-> Int
-> Int
-> Bool
-> PrimIO (Maybe REmatch)
re_search :: PatBuffer -- compiled regexp
- -> _PackedString -- string to search
+ -> PackedString -- string to search
-> Int -- start index
-> Int -- stop index
-> Bool -- True <=> record results in registers
-> PrimIO (Maybe REmatch)
re_search2 :: PatBuffer -- Double buffer search
- -> _PackedString
- -> _PackedString
+ -> PackedString
+ -> PackedString
-> Int -- start index
-> Int -- range (?)
-> Int -- stop index
@@ -600,68 +614,68 @@ re_search2 :: PatBuffer -- Double buffer search
\end{verbatim}
The \tr{MatchPS} module provides Perl-like ``higher-level'' facilities
-to operate on \tr{_PackedStrings}. The regular expressions in
+to operate on \tr{PackedStrings}. The regular expressions in
question are in Perl syntax. The ``flags'' on various functions can
include: \tr{i} for case-insensitive, \tr{s} for single-line mode, and
\tr{g} for global. (It's probably worth your time to peruse the
source code...)
\begin{verbatim}
-matchPS :: _PackedString -- regexp
- -> _PackedString -- string to match
+matchPS :: PackedString -- regexp
+ -> PackedString -- string to match
-> [Char] -- flags
-> Maybe REmatch -- info about what matched and where
-searchPS :: _PackedString -- regexp
- -> _PackedString -- string to match
+searchPS :: PackedString -- regexp
+ -> PackedString -- string to match
-> [Char] -- flags
-> Maybe REmatch
-- Perl-like match-and-substitute:
-substPS :: _PackedString -- regexp
- -> _PackedString -- replacement
+substPS :: PackedString -- regexp
+ -> PackedString -- replacement
-> [Char] -- flags
- -> _PackedString -- string
- -> _PackedString
+ -> PackedString -- string
+ -> PackedString
-- same as substPS, but no prefix and suffix:
-replacePS :: _PackedString -- regexp
- -> _PackedString -- replacement
+replacePS :: PackedString -- regexp
+ -> PackedString -- replacement
-> [Char] -- flags
- -> _PackedString -- string
- -> _PackedString
+ -> PackedString -- string
+ -> PackedString
-match2PS :: _PackedString -- regexp
- -> _PackedString -- string1 to match
- -> _PackedString -- string2 to match
+match2PS :: PackedString -- regexp
+ -> PackedString -- string1 to match
+ -> PackedString -- string2 to match
-> [Char] -- flags
-> Maybe REmatch
-search2PS :: _PackedString -- regexp
- -> _PackedString -- string to match
- -> _PackedString -- string to match
+search2PS :: PackedString -- regexp
+ -> PackedString -- string to match
+ -> PackedString -- string to match
-> [Char] -- flags
-> Maybe REmatch
-- functions to pull the matched pieces out of an REmatch:
getMatchesNo :: REmatch -> Int
-getMatchedGroup :: REmatch -> Int -> _PackedString -> _PackedString
-getWholeMatch :: REmatch -> _PackedString -> _PackedString
-getLastMatch :: REmatch -> _PackedString -> _PackedString
-getAfterMatch :: REmatch -> _PackedString -> _PackedString
+getMatchedGroup :: REmatch -> Int -> PackedString -> PackedString
+getWholeMatch :: REmatch -> PackedString -> PackedString
+getLastMatch :: REmatch -> PackedString -> PackedString
+getAfterMatch :: REmatch -> PackedString -> PackedString
-- (reverse) brute-force string matching;
-- Perl equivalent is index/rindex:
-findPS, rfindPS :: _PackedString -> _PackedString -> Maybe Int
+findPS, rfindPS :: PackedString -> PackedString -> Maybe Int
-- Equivalent to Perl "chop" (off the last character, if any):
-chopPS :: _PackedString -> _PackedString
+chopPS :: PackedString -> PackedString
-- matchPrefixPS: tries to match as much as possible of strA starting
-- from the beginning of strB (handy when matching fancy literals in
-- parsers):
-matchPrefixPS :: _PackedString -> _PackedString -> Int
+matchPrefixPS :: PackedString -> PackedString -> Int
\end{verbatim}
%************************************************************************
@@ -678,7 +692,7 @@ matchPrefixPS :: _PackedString -> _PackedString -> Int
(Darren Moffat supplied the network-interface toolkit.)
Your best bet for documentation is to look at the code---really!---
-normally in \tr{ghc/lib/ghc/{BSD,Socket,SocketPrim}.lhs}.
+normally in \tr{hslibs/ghc/src/{BSD,Socket,SocketPrim}.lhs}.
The \tr{BSD} module provides functions to get at system-database info;
pretty straightforward if you're into this sort of thing:
@@ -749,32 +763,6 @@ modules that GHC does not support (because they require HBC-specific
extensions) are omitted.
\begin{description}
-\item[\tr{Either}:]
-\index{Either module (HBC library)}%
-A binary sum data type:
-\begin{verbatim}
-data Either a b = Left a | Right b
-\end{verbatim}
-The constructor \tr{Left} is typically used for errors; it can be
-renamed to \tr{Wrong} on import.
-
-\item[\tr{Maybe}:]
-\index{Maybe module (HBC library)}%
-A type for failure or success:
-\begin{verbatim}
-data Maybe a = Nothing | Just a
-thenM :: Maybe a -> (a -> Maybe b) -> Maybe b
- -- apply a function that may fail
-\end{verbatim}
-
-\item[\tr{Option}:]
-\index{Option module (HBC library)}%
-An alias for \tr{Maybe}:
-\begin{verbatim}
-data Option a = None | Some a
-thenO :: Option a -> (a -> Option b) -> Option b
-\end{verbatim}
-
\item[\tr{ListUtil}:]
\index{ListUtil module (HBC library)}%
Various useful functions involving lists that are missing from the
@@ -810,7 +798,7 @@ groupEq :: (a->a->Bool) -> [a] -> [[a]]
-- group list elements according to an equality predicate
group :: (Eq a) => [a] -> [[a]]
-- group according to} ==
-readListLazily :: (Text a) => String -> [a]
+readListLazily :: (Read a) => String -> [a]
-- read a list in a lazy fashion
\end{verbatim}
@@ -884,7 +872,7 @@ data Word -- 32 bit quantity
instance Bits Byte, Bits Short, Bits Word
instance Eq Byte, Eq Short, Eq Word
instance Ord Byte, Ord Short, Ord Word
-instance Text Byte, Text Short, Text Word
+instance Show Byte, Show Short, Show Word
instance Num Byte, Num Short, Num Word
wordToShorts :: Word -> [Short] -- convert a Word to two Short
wordToBytes :: Word -> [Byte] -- convert a Word to four Byte
@@ -974,7 +962,7 @@ sepBy1 :: Parser a b -> Parser a c -> Parser a [b]
-- Non-empty sequence of items separated by something
sepBy :: Parser a b -> Parser a c -> Parser a [b]
-- Sequence of items separated by something
-lit :: (Eq a, Text a) => a -> Parser [a] a
+lit :: (Eq a, Show a) => a -> Parser [a] a
-- Recognise a literal token from a list of tokens
litp :: String -> (a->Bool) -> Parser [a] a
-- Recognise a token with a predicate.
@@ -986,7 +974,7 @@ token :: (a -> Either String (b, a)) -> Parser a b
parse :: Parser a b -> a -> Either ([String], a) [(b, a)]
-- Do a parse. Return either error (possible tokens and rest
-- of tokens) or all possible parses.
-sParse :: (Text a) => (Parser [a] b) -> [a] -> Either String b
+sParse :: (Show a) => (Parser [a] b) -> [a] -> Either String b
-- Simple parse. Return error message or result.
\end{verbatim}
@@ -1045,3 +1033,43 @@ instance ... -- All reasonable instances.
isInteger :: Number -> Bool -- Test if a Number is an integer.
\end{verbatim}
\end{description}
+
+%************************************************************************
+%* *
+\subsection[contrib-library]{The `contrib' system library}
+\index{contrib system library}
+\index{system library, contrib}
+%* *
+%************************************************************************
+
+Just for a bit of fun, we took all the old contributed ``Haskell
+library'' code---Stephen J.~Bevan the main hero, converted it to
+Haskell~1.3 and heaved it into a \tr{contrib} system library. It is
+mostly code for numerical methods (@SetMap@ is an exception); we have
+{\em no idea} whether it is any good or not.
+
+The modules provided are:
+@Adams_Bashforth_Approx@,
+@Adams_Predictor_Corrector_Approx@,
+@Choleski_Factorization@,
+@Crout_Reduction@,
+@Cubic_Spline@,
+@Fixed_Point_Approx@,
+@Gauss_Seidel_Iteration@,
+@Hermite_Interpolation@,
+@Horner@,
+@Jacobi_Iteration@,
+@LLDecompMethod@,
+@Least_Squares_Fit@,
+@Matrix_Ops@,
+@Neville_Iterated_Interpolation@,
+@Newton_Cotes@,
+@Newton_Interpolatory_Divided_Difference@,
+@Newton_Raphson_Approx@,
+@Runge_Kutta_Approx@,
+@SOR_Iteration@,
+@Secant_Approx@,
+@SetMap@,
+@Steffensen_Approx@,
+@Taylor_Approx@, and
+@Vector_Ops@.
diff --git a/ghc/docs/users_guide/parallel.lit b/ghc/docs/users_guide/parallel.lit
index 49c18617ed..9d0e7c89fb 100644
--- a/ghc/docs/users_guide/parallel.lit
+++ b/ghc/docs/users_guide/parallel.lit
@@ -22,10 +22,9 @@ optional---the user wants something done!
A Concurrent Haskell program implies multiple `threads' running within
a single Unix process on a single processor.
-Simon Peyton Jones and Sigbjorn Finne have a paper available,
-``Concurrent Haskell: preliminary version.''
-(draft available via \tr{ftp}
-from \tr{ftp.dcs.gla.ac.uk/pub/glasgow-fp/drafts}).
+You will find at least one paper about Concurrent Haskell hanging off
+of Simon Peyton Jones's Web page;
+\tr{http://www.dcs.gla.ac.uk/~simonpj/}.
Parallel Haskell is about {\em speed}---spawning threads onto multiple
processors so that your program will run faster. The `threads'
@@ -35,9 +34,11 @@ get the job done more quickly by sequential execution, then fine.
A Parallel Haskell program implies multiple processes running on
multiple processors, under a PVM (Parallel Virtual Machine) framework.
-Parallel Haskell is new with GHC 0.26; it is more about ``research
-fun'' than about ``speed.'' That will change. There is no paper about
-Parallel Haskell. That will change, too.
+Parallel Haskell is still relatively new; it is more about ``research
+fun'' than about ``speed.'' That will change.
+
+Again, check Simon's Web page for publications about Parallel Haskell
+(including ``GUM'', the key bits of the runtime system).
Some details about Concurrent and Parallel Haskell follow.
@@ -67,8 +68,6 @@ collection of useful concurrency abstractions, including those
mentioned in the ``concurrent paper''.
Just put \tr{import Concurrent} into your modules, and away you go.
-NB: intended for use with the \tr{-fhaskell-1.3} flag.
-
To create a ``required thread'':
\begin{verbatim}
@@ -80,37 +79,33 @@ and ``M-Vars'', which are two flavours of {\em synchronising variables}.
\index{synchronising variables (Glasgow extension)}
\index{concurrency -- synchronising variables}
-\tr{_IVars}\index{_IVars (Glasgow extension)} are write-once
+\tr{IVars}\index{IVars (Glasgow extension)} are write-once
variables. They start out empty, and any threads that attempt to read
them will block until they are filled. Once they are written, any
blocked threads are freed, and additional reads are permitted.
-Attempting to write a value to a full \tr{_IVar} results in a runtime
+Attempting to write a value to a full \tr{IVar} results in a runtime
error. Interface:
\begin{verbatim}
-type IVar a = _IVar a -- more convenient name
-
-newIVar :: IO (_IVar a)
-readIVar :: _IVar a -> IO a
-writeIVar :: _IVar a -> a -> IO ()
+newIVar :: IO (IVar a)
+readIVar :: IVar a -> IO a
+writeIVar :: IVar a -> a -> IO ()
\end{verbatim}
-\tr{_MVars}\index{_MVars (Glasgow extension)} are rendezvous points,
+\tr{MVars}\index{MVars (Glasgow extension)} are rendezvous points,
mostly for concurrent threads. They begin empty, and any attempt to
-read an empty \tr{_MVar} blocks. When an \tr{_MVar} is written, a
-single blocked thread may be freed. Reading an \tr{_MVar} toggles its
+read an empty \tr{MVar} blocks. When an \tr{MVar} is written, a
+single blocked thread may be freed. Reading an \tr{MVar} toggles its
state from full back to empty. Therefore, any value written to an
-\tr{_MVar} may only be read once. Multiple reads and writes are
+\tr{MVar} may only be read once. Multiple reads and writes are
allowed, but there must be at least one read between any two
writes. Interface:
\begin{verbatim}
-type MVar a = _MVar a -- more convenient name
-
-newEmptyMVar :: IO (_MVar a)
-newMVar :: a -> IO (_MVar a)
-takeMVar :: _MVar a -> IO a
-putMVar :: _MVar a -> a -> IO ()
-readMVar :: _MVar a -> IO a
-swapMVar :: _MVar a -> a -> IO a
+newEmptyMVar :: IO (MVar a)
+newMVar :: a -> IO (MVar a)
+takeMVar :: MVar a -> IO a
+putMVar :: MVar a -> a -> IO ()
+readMVar :: MVar a -> IO a
+swapMVar :: MVar a -> a -> IO a
\end{verbatim}
A {\em channel variable} (@CVar@) is a one-element channel, as
@@ -157,20 +152,20 @@ nmergeIO :: [[a]] -> IO [a]
\end{verbatim}
A {\em Sample variable} (@SampleVar@) is slightly different from a
-normal @_MVar@:
+normal @MVar@:
\begin{itemize}
\item Reading an empty @SampleVar@ causes the reader to block
- (same as @takeMVar@ on empty @_MVar@).
+ (same as @takeMVar@ on empty @MVar@).
\item Reading a filled @SampleVar@ empties it and returns value.
(same as @takeMVar@)
\item Writing to an empty @SampleVar@ fills it with a value, and
-potentially, wakes up a blocked reader (same as for @putMVar@ on empty @_MVar@).
+potentially, wakes up a blocked reader (same as for @putMVar@ on empty @MVar@).
\item Writing to a filled @SampleVar@ overwrites the current value.
- (different from @putMVar@ on full @_MVar@.)
+ (different from @putMVar@ on full @MVar@.)
\end{itemize}
\begin{verbatim}
-type SampleVar a = _MVar (Int, _MVar a)
+type SampleVar a = MVar (Int, MVar a)
emptySampleVar :: SampleVar a -> IO ()
newSampleVar :: IO (SampleVar a)
@@ -255,18 +250,12 @@ not generate code to evaluate the addends from left to right.
%* *
%************************************************************************
-The functions \tr{par} and \tr{seq} are really just renamings:
-\begin{verbatim}
-par a b = _par_ a b
-seq a b = _seq_ a b
-\end{verbatim}
-
-The functions \tr{_par_} and \tr{_seq_} are built into GHC, and unfold
+The functions \tr{par} and \tr{seq} are wired into GHC, and unfold
into uses of the \tr{par#} and \tr{seq#} primitives, respectively. If
you'd like to see this with your very own eyes, just run GHC with the
\tr{-ddump-simpl} option. (Anything for a good time...)
-You can use \tr{_par_} and \tr{_seq_} in Concurrent Haskell, though
+You can use \tr{par} and \tr{seq} in Concurrent Haskell, though
I'm not sure why you would want to.
%************************************************************************
@@ -390,12 +379,9 @@ PVM ``processors'' your program to run on. (For more details of
all relevant RTS options, please see \sectionref{parallel-rts-opts}.)
In truth, running Parallel Haskell programs and getting information
-out of them (e.g., activity profiles) is a battle with the vagaries of
+out of them (e.g., parallelism profiles) is a battle with the vagaries of
PVM, detailed in the following sections.
-For example: the stdout and stderr from your parallel program run will
-appear in a log file, called something like \tr{/tmp/pvml.NNN}.
-
%************************************************************************
%* *
\subsubsubsection{Dummy's guide to using PVM}
@@ -447,56 +433,54 @@ results---only with ``how parallel'' it was! We want pretty pictures.
Parallelism profiles (\`a la \tr{hbcpp}) can be generated with the
\tr{-q}\index{-q RTS option (concurrent, parallel)} RTS option. The
-per-processor profiling info is dumped into files {\em in your home
-directory} named \tr{<program>.gr}. These are then munged into a
-PostScript picture, which you can then display. For example,
-to run your program \tr{a.out} on 8 processors, then view the
-parallelism profile, do:
+per-processor profiling info is dumped into files named
+\tr{<full-path><program>.gr}. These are then munged into a PostScript picture,
+which you can then display. For example, to run your program
+\tr{a.out} on 8 processors, then view the parallelism profile, do:
\begin{verbatim}
% ./a.out +RTS -N8 -q
-% cd # to home directory
-% grs2gr *.???.gr # combine the 8 .gr files into one
-% gr2ps -O temp.gr # cvt to .ps; output in temp.ps
+% grs2gr *.???.gr > temp.gr # combine the 8 .gr files into one
+% gr2ps -O temp.gr # cvt to .ps; output in temp.ps
% ghostview -seascape temp.ps # look at it!
\end{verbatim}
The scripts for processing the parallelism profiles are distributed
in \tr{ghc/utils/parallel/}.
-%************************************************************************
-%* *
-\subsubsection{Activity profiles}
-\index{activity profiles}
-\index{profiles, activity}
-\index{visualisation tools}
-%* *
-%************************************************************************
-
-You can also use the standard GHC ``cost-centre'' profiling to see how
-much time each PVM ``processor'' spends
-
-No special compilation flags beyond \tr{-parallel} are required to get
-this basic four-activity profile. Just use the \tr{-P} RTS option,
-thusly:
-\begin{verbatim}
-./a.out +RTS -N7 -P # 7 processors
-\end{verbatim}
-
-The above will create files named \tr{<something>.prof} and/or
-\tr{<something>.time} {\em in your home directory}. You can
-process the \tr{.time} files into PostScript using \tr{hp2ps},
-\index{hp2ps}
-as described elsewhere in this guide.
-
-Because of the weird file names, you probably need to use
-\tr{hp2ps} as a filter. Also, you probably want to give \tr{hp2ps}
-a \tr{-t0} flag, so that no ``inconsequential'' data is ignored---in
-parallel-land it's all consequential. So:
-\begin{verbatim}
-% hp2ps -t0 < fooo.001.time > temp.ps
-\end{verbatim}
-
+%$$************************************************************************
+%$$* *
+%$$\subsubsection{Activity profiles}
+%$$\index{activity profiles}
+%$$\index{profiles, activity}
+%$$\index{visualisation tools}
+%$$%$$* *
+%$$%$$************************************************************************
+%$$
+%$$You can also use the standard GHC ``cost-centre'' profiling to see how
+%$$much time each PVM ``processor'' spends
+%$$
+%$$No special compilation flags beyond \tr{-parallel} are required to get
+%$$this basic four-activity profile. Just use the \tr{-P} RTS option,
+%$$thusly:
+%$$\begin{verbatim}
+%$$./a.out +RTS -N7 -P # 7 processors
+%$$\end{verbatim}
+%$$
+%$$The above will create files named \tr{<something>.prof} and/or
+%$$\tr{<something>.time} {\em in your home directory}. You can
+%$$process the \tr{.time} files into PostScript using \tr{hp2ps},
+%$$\index{hp2ps}
+%$$as described elsewhere in this guide.
+%$$
+%$$Because of the weird file names, you probably need to use
+%$$\tr{hp2ps} as a filter. Also, you probably want to give \tr{hp2ps}
+%$$a \tr{-t0} flag, so that no ``inconsequential'' data is ignored---in
+%$$parallel-land it's all consequential. So:
+%$$\begin{verbatim}
+%$$%$$ hp2ps -t0 < fooo.001.time > temp.ps
+%$$\end{verbatim}
+%$$
%$$ The first line of the
%$$ \tr{.qp} file contains the name of the program executed, along with
%$$ any program arguments and thread-specific RTS options. The second
@@ -546,11 +530,12 @@ parallel-land it's all consequential. So:
%* *
%************************************************************************
-The ``garbage-collection statistics'' RTS options can be useful
-for seeing what parallel programs are doing. If you do either
-\tr{+RTS -Sstderr}\index{-Sstderr RTS option} or \tr{+RTS -sstderr},
-then you'll get mutator, garbage-collection, etc., times on standard
-error which, for PVM programs, appears in \tr{/tmp/pvml.nnn}.
+The ``garbage-collection statistics'' RTS options can be useful for
+seeing what parallel programs are doing. If you do either
+\tr{+RTS -Sstderr}\index{-Sstderr RTS option} or \tr{+RTS -sstderr}, then
+you'll get mutator, garbage-collection, etc., times on standard
+error. The standard error of all PE's other than the `main thread'
+appears in \tr{/tmp/pvml.nnn}, courtesy of PVM.
Whether doing \tr{+RTS -Sstderr} or not, a handy way to watch
what's happening overall is: \tr{tail -f /tmp/pvml.nnn}.
@@ -618,13 +603,20 @@ in \tr{ghc/utils/pvm/}.
(PARALLEL ONLY) Limit the number of pending sparks per processor to
\tr{<num>}. The default is 100. A larger number may be appropriate if
your program generates large amounts of parallelism initially.
+
+\item[\tr{-Q<num>}:]
+\index{-Q<num> RTS option (parallel)}
+(PARALLEL ONLY) Set the size of packets transmitted between processors
+to \tr{<num>}. The default is 1024 words. A larger number may be
+appropriate if your machine has a high communication cost relative to
+computation speed.
\end{description}
%************************************************************************
%* *
-\subsubsubsection[parallel-problems]{Potential problems with Parallel Haskell}
-\index{Parallel Haskell---problems}
-\index{problems, Parallel Haskell}
+\subsubsubsection[parallel-problems]{Potential problems with Parallel Haskell}
+\index{Parallel Haskell---problems}
+\index{problems, Parallel Haskell}
%* *
%************************************************************************
diff --git a/ghc/docs/users_guide/prof-compiler-options.lit b/ghc/docs/users_guide/prof-compiler-options.lit
index 0f870b431f..c209b2d54a 100644
--- a/ghc/docs/users_guide/prof-compiler-options.lit
+++ b/ghc/docs/users_guide/prof-compiler-options.lit
@@ -24,11 +24,11 @@ all top-level, exported functions.
{\em All} top-level functions, exported or not, will be automatically
\tr{_scc_}'d.
-% secret!
-%\item[\tr{-caf-all}:]
-%\index{-caf-all option}
-%The costs of all CAFs in a module are usually attributed to one
-%``big'' CAF cost-centre. With this option, all CAFs get their own cost-centre.
+\item[\tr{-caf-all}:]
+\index{-caf-all option}
+The costs of all CAFs in a module are usually attributed to one
+``big'' CAF cost-centre. With this option, all CAFs get their own cost-centre.
+An ``if all else fails'' option...
%\item[\tr{-dict-all}:]
%\index{-dict-all option}
diff --git a/ghc/docs/users_guide/prof-rts-options.lit b/ghc/docs/users_guide/prof-rts-options.lit
index 12325d5ba0..cdbe3ed077 100644
--- a/ghc/docs/users_guide/prof-rts-options.lit
+++ b/ghc/docs/users_guide/prof-rts-options.lit
@@ -9,7 +9,7 @@ what you want to profile (e.g., time and/or space), and how you wish
the collected data to be reported. You also may wish to set the
sampling interval used in time profiling.
-Executive summary: \tr{./a.out +RTS -p} produces a time profile in
+Executive summary: \tr{./a.out +RTS -pT} produces a time profile in
\tr{a.out.prof}; \tr{./a.out +RTS -hC} produces space-profiling
info which can be mangled by \tr{hp2ps} and viewed with \tr{ghostview}
(or equivalent).
@@ -23,13 +23,13 @@ Profiling runtime flags are passed to your program between the usual
\index{-P<sort> RTS option (profiling)}
\index{time profile}
\index{serial time profile}
-The \tr{-p} option produces a standard {\em time profile} report.
+The \tr{-p?} option produces a standard {\em time profile} report.
It is written into the file \pl{<program>}\tr{.prof}.
-The \tr{-P} option produces a more detailed report containing the
+The \tr{-P?} option produces a more detailed report containing the
actual time and allocation data as well. (Not used much.)
-The \tr{-P} option also produces {\em serial time-profiling}
+The \tr{-P?} option also produces {\em serial time-profiling}
information, in the file \pl{<program>}\tr{.time}. This can be
converted into a (somewhat unsatisfactory) PostScript graph using
\tr{hp2ps} (see \Sectionref{hp2ps}).
diff --git a/ghc/docs/users_guide/profiling.lit b/ghc/docs/users_guide/profiling.lit
index 9f55739411..356a60503c 100644
--- a/ghc/docs/users_guide/profiling.lit
+++ b/ghc/docs/users_guide/profiling.lit
@@ -2,13 +2,13 @@
\documentstyle[11pt,literate]{article}
\begin{document}
\title{The Glorious Haskell Compilation System\\ Profiling Guide}
-\author{The AQUA Team (Patrick M. Sansom)\\
+\author{The GHC Team (Patrick M. Sansom)\\
Department of Computing Science\\
University of Glasgow\\
Glasgow, Scotland\\
G12 8QQ\\
\\
-Email: glasgow-haskell-\{users,bugs\}-request\@dcs.glasgow.ac.uk}
+Email: glasgow-haskell-\{users,bugs\}-request\@dcs.gla.ac.uk}
\maketitle
\begin{rawlatex}
\tableofcontents
@@ -25,7 +25,7 @@ execution behaviour, so you can improve it.
%This profiling system is still under development.
%Please e-mail reports of any bugs you discover to
-%\tr{glasgow-haskell-bugs@dcs.glasgow.ac.uk}.
+%\tr{glasgow-haskell-bugs@dcs.gla.ac.uk}.
Any comments, suggestions and/or improvements you have to are welcome.
Recommended ``profiling tricks'' would be especially cool!
@@ -238,7 +238,7 @@ system. Just visit the Glasgow FP Web page...
%
% It is in the GHC distribution in \tr{ghc/docs/papers/profiling.ps},
% or it can be retrieved using ftp from
-% \tr{ftp.dcs.glasgow.ac.uk} (\tr{[130.209.240.50]})
+% \tr{ftp.dcs.gla.ac.uk} (\tr{[130.209.240.50]})
% in the file
% \tr{pub/glasgow-fp/papers/lazy-profiling.ps}.
diff --git a/ghc/docs/users_guide/recomp.lit b/ghc/docs/users_guide/recomp.lit
new file mode 100644
index 0000000000..f9da199194
--- /dev/null
+++ b/ghc/docs/users_guide/recomp.lit
@@ -0,0 +1,41 @@
+%************************************************************************
+%* *
+\section[recomp]{The GHC recompilation checker (and using `make')}
+\index{recompilation checker}
+\index{make and recompilation}
+%* *
+%************************************************************************
+
+The Haskell~1.3 module system (nicely improved, thank you very much)
+requires a substantially different implementation, which we have done
+for GHC~2.01.
+
+We have taken a fairly radical approach and implemented a ``go to the
+horse's mouth'' scheme; that is, when seeking out information about an
+entity \tr{Foo.bar}, GHC will always look in the interface for
+\tr{Foo}---no matter what interface {\em told us} about \tr{Foo.bar}
+(via re-exporting and whatnot).
+
+This ``horse's mouth'' scheme removes some of the most unsavoury
+aspects of GHC~0.2x's behaviour (because of picking up duff
+information about re-exported entities). However, the keen-minded
+user will see that it makes it difficult to maintain
+\tr{make}-friendly dependencies between modules.
+
+Enter the ``recompilation checker'', which you invoke merely by adding
+a \tr{-recomp} option\index{-recomp option} to your GHC command line.
+(You should also create ordinary `make' dependencies; there's an
+example of how to do this in \sectionref{mkdependHS}.)
+
+GHC will now start ``compiling'' much more often than in the old days,
+but it will frequently bail out quickly, saying the recompile is
+\tr{NOT NEEDED}. What a beautiful sight!
+
+The recompilation checker is not finished. Its main faults are:
+(a)~it doesn't yet do the right things for instance declarations;
+(b)~it doesn't do anything about pragmas (happily, GHC~2.01 doesn't
+produce any); (c)~it has no good solution for mutually-recursive
+modules.
+
+Patrick Sansom has a workshop paper about how all these things should
+be done. Ask him (email: \tr{sansom}) if you want a copy.
diff --git a/ghc/docs/users_guide/runtime_control.lit b/ghc/docs/users_guide/runtime_control.lit
index f44a8367fb..9f4882f592 100644
--- a/ghc/docs/users_guide/runtime_control.lit
+++ b/ghc/docs/users_guide/runtime_control.lit
@@ -21,11 +21,11 @@ When your Haskell program starts up, its RTS extracts
command-line arguments bracketed between \tr{+RTS}\index{+RTS option}
and \tr{-RTS}\index{-RTS option} as its own. For example:
\begin{verbatim}
-% ./a.out -f +RTS -p -S -RTS -h foo bar
+% ./a.out -f +RTS -pT -S -RTS -h foo bar
\end{verbatim}
-The RTS will snaffle \tr{-p -S} for itself,
+The RTS will snaffle \tr{-pT -S} for itself,
and the remaining arguments \tr{-f -h foo bar} will be handed
-to your program when it does a @GetArgs@ I/O request.
+to your program if/when it calls @System.getArgs@.
No \tr{-RTS} option is required if the runtime-system options extend
to the end of the command line, as in this example:
@@ -83,11 +83,12 @@ specially, with the output really being sent to \tr{stderr}.
%Note that the same program will typically allocate more space with a
%generational collector than with a non-generational collector.
-The amount of heap allocation will typically increase as the total heap
-size is reduced. The reason for this odd behaviour is that updates of
-promoted-to-old-generation objects may require the extra allocation of a new-generation
-object to ensure that there are never any pointers from the old
-generation to the new generation.
+
+The amount of heap allocation will typically increase as the total
+heap size is reduced. The reason for this odd behaviour is that
+updates of promoted-to-old-generation objects may require the extra
+allocation of a new-generation object to ensure that there are never
+any pointers from the old generation to the new generation.
For some garbage collectors (not including the default one, sadly),
you can convert the \tr{-S} output into a residency graph (in
@@ -189,18 +190,18 @@ recommended for everyday use!
Sound the bell at the start of each (major) garbage collection.
Oddly enough, people really do use this option! Our pal in Durham
-(England), PaulCallaghan, writes: ``Some people here use it for a
+(England), Paul Callaghan, writes: ``Some people here use it for a
variety of purposes---honestly!---e.g., confirmation that the
code/machine is doing something, infinite loop detection, gauging cost
of recently added code. Certain people can even tell what stage [the
program] is in by the beep pattern. But the major use is for annoying
others in the same office...''
-\item[\tr{-I}:]
-Use the ``debugging mini-interpreter'' with sanity-checking; you have
-to have an appropriately-compiled version of the prelude, etc.
-Goes together nicely with GDB (GNU debugger)...
-(OLD, REALLY)
+% \item[\tr{-I}:]
+% Use the ``debugging mini-interpreter'' with sanity-checking; you have
+% to have an appropriately-compiled version of the prelude, etc.
+% Goes together nicely with GDB (GNU debugger)...
+% (OLD, REALLY)
\item[\tr{-r<file>}:]
\index{-r <file> RTS option}
@@ -239,7 +240,7 @@ GHC lets you exercise rudimentary control over the messages printed
when the runtime system ``blows up,'' e.g., on stack overflow.
Simply write some of the following procedures in C and then make sure
-they get linked in, in preference to those in the RTS library:
+they get linked in preference to those in the RTS library:
\begin{description}
\item[\tr{void ErrorHdrHook (FILE *)}:]
\index{ErrorHdrHook}
@@ -278,16 +279,13 @@ For example, here is the ``hooks'' code used by GHC itself:
#define I_ long int
void
-ErrorHdrHook (where)
- FILE *where;
+ErrorHdrHook (FILE *where)
{
fprintf(where, "\n"); /* no "Fail: " */
}
void
-OutOfHeapHook (request_size, heap_size)
- W_ request_size; /* in bytes */
- W_ heap_size; /* in bytes */
+OutOfHeapHook (W_ request_size, W_ heap_size) /* both sizes in bytes */
{
fprintf(stderr, "GHC's heap exhausted;\nwhile trying to
allocate %lu bytes in a %lu-byte heap;\nuse the `-H<size>'
@@ -297,8 +295,7 @@ OutOfHeapHook (request_size, heap_size)
}
void
-StackOverflowHook (stack_size)
- I_ stack_size; /* in bytes */
+StackOverflowHook (I_ stack_size) /* in bytes */
{
fprintf(stderr, "GHC stack-space overflow: current size
%ld bytes.\nUse the `-K<size>' option to increase it.\n",
@@ -306,24 +303,21 @@ StackOverflowHook (stack_size)
}
void
-PatErrorHdrHook (where)
- FILE *where;
+PatErrorHdrHook (FILE *where)
{
fprintf(where, "\n*** Pattern-matching error within GHC!\n\n
This is a compiler bug; please report it to
- glasgow-haskell-bugs@dcs.glasgow.ac.uk.\n\nFail: ");
+ glasgow-haskell-bugs@dcs.gla.ac.uk.\n\nFail: ");
}
void
-PreTraceHook (where)
- FILE *where;
+PreTraceHook (FILE *where)
{
fprintf(where, "\n"); /* not "Trace On" */
}
void
-PostTraceHook (where)
- FILE *where;
+PostTraceHook (FILE *where)
{
fprintf(where, "\n"); /* not "Trace Off" */
}
diff --git a/ghc/docs/users_guide/sooner.lit b/ghc/docs/users_guide/sooner.lit
index 361ea84aa0..52125183c1 100644
--- a/ghc/docs/users_guide/sooner.lit
+++ b/ghc/docs/users_guide/sooner.lit
@@ -62,28 +62,8 @@ remotely-mounted disks; then {\em link} on a slow machine that had
your disks directly mounted.
%----------------------------------------------------------------
-\item[Don't derive \tr{read} for \tr{Text} unnecessarily:]
-When doing \tr{deriving Text},
-use \tr{-fomit-derived-read}\index{-fomit-derived-read option}
-to derive only the \tr{showsPrec} method. Quicker, smaller code.
-
-%----------------------------------------------------------------
-\item[Don't re-export instance declarations:]
-
-(Note: This recommendation totally violates the Haskell language
-standard.)
-
-The Haskell module system dictates that instance declarations are
-exported and re-exported into interface files with considerable gusto.
-In a large system, especially one with mutually-recursive modules,
-this tendency makes your interface files bigger (bad) and decreases
-the chances that changes will be propagated incorrectly (bad).
-
-If you wish, you may use a language-violating option,
-\tr{-fomit-reexported-instances},
-\index{-fomit-reexported-instances option}
-to get just the effect you might expect. It can't help but
-speed things up.
+\item[Don't derive/use \tr{Read} unnecessarily:]
+It's ugly and slow.
%----------------------------------------------------------------
\item[GHC compiles some program constructs slowly:]
@@ -162,7 +142,9 @@ faster''...
to make your program go faster. Compilation time will be slower,
especially with \tr{-O2}.
-At version~0.26, \tr{-O2} is nearly indistinguishable from \tr{-O}.
+At present, \tr{-O2} is nearly indistinguishable from \tr{-O}.
+
+At version 2.01, \tr{-O} is a dodgy proposition, no matter what.
%----------------------------------------------------------------
\item[Compile via C and crank up GCC:] Even with \tr{-O}, GHC tries to
@@ -262,11 +244,6 @@ type signatures; e.g.,:
\begin{verbatim}
% egrep '^[a-z].*::.*=>' *.hi
\end{verbatim}
-
-Note: explicit export lists sometimes ``mask'' overloaded top-level
-functions; i.e., you won't see anything about them in the interface
-file. I sometimes remove my export list temporarily, just to see what
-pops out.
\end{description}
%----------------------------------------------------------------
@@ -488,8 +465,7 @@ option for the extreme case. (``Only unfoldings with zero cost should proceed.''
(Note: I have not been too successful at producing code smaller
than that which comes out with \tr{-O}. WDP 94/12)
-Use \tr{-fomit-derived-read} if you are using a lot of derived
-instances of \tr{Text} (and don't need the read methods).
+Avoid \tr{Read}.
Use \tr{strip} on your executables.
diff --git a/ghc/docs/users_guide/user.lit b/ghc/docs/users_guide/user.lit
index 858a12b814..bf3a3613f3 100644
--- a/ghc/docs/users_guide/user.lit
+++ b/ghc/docs/users_guide/user.lit
@@ -1,14 +1,14 @@
\begin{onlystandalone}
\documentstyle[11pt,literate]{article}
\begin{document}
-\title{The Glorious Glasgow Haskell Compilation System\\ Version~0.26\\ User's Guide}
-\author{The AQUA Team\\
+\title{The Glorious Glasgow Haskell Compilation System\\ Version~2.01\\ User's Guide}
+\author{The GHC Team\\
Department of Computing Science\\
University of Glasgow\\
Glasgow, Scotland\\
G12 8QQ\\
\\
-Email: glasgow-haskell-\{bugs,users\}-request\@dcs.glasgow.ac.uk}
+Email: glasgow-haskell-\{bugs,users\}-request\@dcs.gla.ac.uk}
\maketitle
\begin{rawlatex}
\tableofcontents
@@ -25,7 +25,9 @@ Email: glasgow-haskell-\{bugs,users\}-request\@dcs.glasgow.ac.uk}
\input{libraries.lit}
\input{parallel.lit}
\input{gone_wrong.lit}
+\input{backwards.lit}
\input{vs_haskell.lit}
+\input{recomp.lit}
\input{utils.lit}
\input{ticky.lit}
\input{tutorial.lit}
diff --git a/ghc/docs/users_guide/utils.lit b/ghc/docs/users_guide/utils.lit
index 6ec326e6f9..94516bfd38 100644
--- a/ghc/docs/users_guide/utils.lit
+++ b/ghc/docs/users_guide/utils.lit
@@ -22,7 +22,7 @@ GHC, assuming you name your source files the same as your modules.
Thus:
\begin{verbatim}
HC = ghc
-HCFLAGS = -fhaskell-1.3 -cpp -hi-diffs $(EXTRA_HC_OPTS)
+HCFLAGS = -recomp -cpp -hi-diffs $(EXTRA_HC_OPTS)
SRCS = Main.lhs Foo.lhs Bar.lhs
OBJS = Main.o Foo.o Bar.o
@@ -65,6 +65,11 @@ Now, before you start compiling, and any time you change the
\tr{make cool_pgm}. \tr{mkdependHS} will append the needed
dependencies to your \tr{Makefile}.
+Please note the use of the recompilation checker (the \tr{-recomp}
+\index{-recomp option} flag). Without it, your dependencies will be
+{\em inadequate} to cope with the Haskell~1.3 module system! See
+\sectionref{recomp} for more details about the recompilation checker!
+
A few caveats about this simple scheme: (a)~You may need to compile
some modules explicitly to create their interfaces in the first place
(e.g., \tr{make Bar.o} to create \tr{Bar.hi}). (b)~You may have to
@@ -85,6 +90,8 @@ e.g., \tr{mkdependHS -help}.
%* *
%************************************************************************
+NB: \tr{hstags} is temporarily dead at version~2.01. Sigh.
+
`Tags' is a facility for indexing the definitions of
programming-language things in a multi-file program, and then using
that index to jump around among these definitions.
@@ -133,7 +140,7 @@ Andy Gill and Simon Marlow have written a parser-generator for
Haskell, called \tr{happy}.\index{happy parser generator} \tr{Happy}
is to Haskell what \tr{Yacc} is to C.
-You can get \tr{happy} by FTP from \tr{ftp.dcs.glasgow.ac.uk} in
+You can get \tr{happy} by FTP from \tr{ftp.dcs.gla.ac.uk} in
\tr{pub/haskell/happy}, the file \tr{happy-0.8.tar.gz}.
\tr{Happy} is at its shining best when compiled by GHC.
diff --git a/ghc/docs/users_guide/vs_haskell.lit b/ghc/docs/users_guide/vs_haskell.lit
index 912e2df78c..9061d65d07 100644
--- a/ghc/docs/users_guide/vs_haskell.lit
+++ b/ghc/docs/users_guide/vs_haskell.lit
@@ -1,16 +1,20 @@
%************************************************************************
%* *
-\section[vs-Haskell-defn]{Haskell~1.2 vs.~Glasgow Haskell~0.26: language non-compliance}
-\index{GHC vs the Haskell 1.2 language}
-\index{Haskell 1.2 language vs GHC}
+\section[vs-Haskell-defn]{Haskell~1.3 vs.~Glasgow Haskell~2.01: language non-compliance}
+\index{GHC vs the Haskell 1.3 language}
+\index{Haskell 1.3 language vs GHC}
%* *
%************************************************************************
This section lists Glasgow Haskell infelicities in its implementation
-of Haskell~1.2. See also the ``when things go wrong'' section
+of Haskell~1.3. See also the ``when things go wrong'' section
(\sectionref{wrong}) for information about crashes, space leaks, and
other undesirable phenomena.
+GHC~2.01 does not have a ``Haskell~1.2 compatibility mode.'' If
+enough people yelled loudly and long enough, it could conceivably
+happen...
+
The limitations here are listed in Haskell-Report order (roughly).
%Limitations related to Glasgow extensions (unboxed numbers, etc.) are
%given thereafter (\sectionref{infelicities-Glasgow-exts}).
@@ -23,8 +27,10 @@ The limitations here are listed in Haskell-Report order (roughly).
\begin{description}
%-------------------------------------------------------------------
-\item[Some valid irrefutable patterns are rejected:]
-As syntax errors; just put parentheses around them.
+\item[No @n+k@ patterns:]
+We took them out of GHC, because we thought they weren't going to
+survive into Haskell~1.3. But they did. And we haven't put them
+back. Yet.
%-------------------------------------------------------------------
\item[Very long @String@ constants:]
@@ -39,6 +45,10 @@ option don't mix. The C-preprocessor may munch the backslashes.
\item[Very long literal lists:]
These may tickle a ``yacc stack overflow'' error in the parser.
(It depends on the Yacc used to build your parser.)
+
+%-------------------------------------------------------------------
+\item[Single quotes in module names:]
+It might work, but it's just begging for trouble.
\end{description}
%************************************************************************
@@ -49,30 +59,13 @@ These may tickle a ``yacc stack overflow'' error in the parser.
\begin{description}
%-------------------------------------------------------------------
-\item[Contexts on @data@ declarations are ignored:]
-Not that they do much, anyway... This won't wreck your life.
-(We still [vaguely] plan to add them, however.)
-
-%-------------------------------------------------------------------
-\item[Location of instance declarations is unchecked:]
-We don't check that instance declarations occur either in the module
-where the class is declared or the module where the data type is
-declared. This shouldn't hurt you.
-
-For better or worse, we {\em do} check if you try to declare a Prelude
-instance (Prelude class, Prelude type; e.g., \tr{instance Num Bool})
-in one of your own modules. For some reason, people like to do this!
-(But it is not legal Haskell.)
-
-%-------------------------------------------------------------------
-\item[Derived instances of @Text@ for infix constructors:]
+\item[Derived instances of @Read@ and @Show@ for infix constructors:]
All the carry-on about derived @readsPrec@ and @showsPrec@ for infix
constructors---we don't do it (yet). We treat them the same way as
all other constructors.
%-------------------------------------------------------------------
-\item[Derived instances of @Binary@:]
-We don't. (We don't do anything @Binary@ish.)
+\item[Derived instances for records:] Hmmm.
\end{description}
%************************************************************************
@@ -83,112 +76,9 @@ We don't. (We don't do anything @Binary@ish.)
\begin{description}
%-------------------------------------------------------------------
-\item[Duplicates in a `renaming' list:]
-Are not reported.
-
-%-------------------------------------------------------------------
-\item[Duplicates in an `import' declaration:]
-These are reported as errors, which some might argue they shouldn't
-be. We reckon it's a feature, not a bug.
-
-%-------------------------------------------------------------------
-\item[Export of `renamed' class methods:]
-Willnae work. That is: you import a class, renaming one or more
-methods; then export that class---the renaming of the methods {\em
-will not} propagate.
-
-(Otherwise, `renaming'---disgusting though it may be---should work.)
-
-%-------------------------------------------------------------------
-\item[Fixities/precedences following `renamed' entities that are exported:]
-No chance.
-
-%-------------------------------------------------------------------
-\item[\tr{import Foo ()} vs \tr{import Foo}:]
-GHC cannot tell the difference (!).
-
-Given that the only module on which you might want to do the former is
-\tr{import Prelude ()}, there are probably much bigger gremlins that
-would jump out and bite you if the import {\em did} work. Besides
-which, you can achieve the same result with
-\tr{-fno-implicit-prelude}.\index{-fno-implicit-prelude option}
-
-%-------------------------------------------------------------------
-\item[Some selective import/export checking not done:]
-On selective import and export of type-constructors/classes in
-which the data-constructors/methods are named explicitly:
-it'll work; it's just that every conceivable paranoia
-check won't be done.
-
-%-------------------------------------------------------------------
-\item[Some Prelude entities cannot be hidden:]
-For example, this doesn't work:
-\begin{verbatim}
-import Prelude hiding (readParen)
-\end{verbatim}
-That's because there are a few should-be-hideable Prelude entities
-which need to appear by magic for derived instances. They are
-\tr{(&&)}, \tr{(.)}, \tr{lex}, \tr{map}, \tr{not}, \tr{readParen},
-\tr{showParen}, and \tr{showString}. SIGH.
-
-%-------------------------------------------------------------------
-\item[\tr{M..} exports vs multiply-imported entities:]
-If an entity \tr{foo} is imported from several interfaces, as in...
-\begin{verbatim}
-import A1 (foo); import A2 (foo); import A3 (foo)
-\end{verbatim}
-... and you then do a ``dot dot'' export of \tr{A1} (for example), it
-will be {\em pure luck} if \tr{foo} gets exported. This is very sad.
-
-Workaround: export \tr{foo} explicitly.
-
-%-------------------------------------------------------------------
-\item[\tr{M..} with Prelude interfaces:]
-Doing \tr{Prelude<something>..} in an export list; don't even think
-it.
-
-%-------------------------------------------------------------------
-\item[Export of Prelude types/classes must be explicit:]
-
-If you want to export a data type, type synonym or class from a
-Prelude module (its name starts with `Prelude'), then it must be
-listed explicitly in the export list. If you say:
-
-\begin{verbatim}
-module PreludeMeGently ( PreludeMeGently.. , other_stuff ) where ..
-\end{verbatim}
-
-then the classes/types in \tr{PreludeMeGently} will {\em not} be
-exported; just add them to the export list. (This shortcoming is only
-likely to affect people writing their own Prelude modules.)
-
-%-------------------------------------------------------------------
\item[Can't export primitives types (e.g., \tr{Int#}):]
Don't even try...
-
-%-------------------------------------------------------------------
-\item[Naming errors with \tr{-O} but not without:]
-
-Documentation by example---Consider a module with these imports:
-
-\begin{verbatim}
-... various imports ...
-import Prettyterm -- desired import
-
-import Pretty -- sadly-needed import
-\end{verbatim}
-
-The \tr{import Pretty} is required because it defines a type
-\tr{Pretty.Doc} which is mentioned in \tr{import Prettyterm}.
-(Extremely sad, but them's the rules.)
-
-But without \tr{-O}, GHC uses its \tr{-fuse-get-mentioned-vars} hack
-(for speed), trying to avoid looking at parts of interfaces that have
-no relevance to this module. As it happens, the thing in
-\tr{Prettyterm} that mentions \tr{Pretty.Doc} is not used here, so
-this module will go through without \tr{import Pretty}. Nice, but
-wrong.
\end{description}
%************************************************************************
@@ -199,15 +89,10 @@ wrong.
\begin{description}
%-------------------------------------------------------------------
-% now in glasgow_exts
-%\item[@fromInt@ method in class @Num@:]
-% (Non-standard.) We support it, as does HBC.
-
-%-------------------------------------------------------------------
\item[Very large/small fractional constants:]
(i.e., with a decimal point somewhere) GHC does not check that these
are out of range (e.g., for a @Float@), and bad things will inevitably
-follow. To be corrected.
+follow. (To be corrected?)
This problem does {\em not} exist for integral constants.
@@ -221,8 +106,8 @@ Arguably {\em not} an infelicity, but... Bear in mind that operations
on \tr{Int}, \tr{Float}, and \tr{Double} numbers are {\em unchecked}
for overflow, underflow, and other sad occurrences.
-Use \tr{Integer}, \tr{Rational}, etc., numeric types if this stuff keeps you
-awake at night.
+Use \tr{Integer}, \tr{Rational}, etc., numeric types if this stuff
+keeps you awake at night.
%-------------------------------------------------------------------
\item[Multiply-defined array elements---not checked:]
@@ -230,376 +115,29 @@ This code fragment {\em should} elicit a fatal error, but it does not:
\begin{verbatim}
main = print (array (1,1) [ 1:=2, 1:=3 ])
\end{verbatim}
-
-%-------------------------------------------------------------------
-\item[Support for @Binary@ whatnot:]
-We don't.
\end{description}
%************************************************************************
%* *
-\subsection[infelicities-IO]{Dialogue I/O}
+\subsection[infelicities-Prelude]{In Prelude support}
%* *
%************************************************************************
-Dialogue-style I/O---still the default for GHC---is on its way out
-(see the stuff about ``monadic I/O for Haskell~1.3''), so we probably
-won't fix these shortcomings.
-
\begin{description}
%-------------------------------------------------------------------
-\item[Support for @Dialogue@ I/O:]
-We do not yet support all @Requests@, notably:
-@ReadBinFile@,
-@WriteBinFile@,
-@AppendBinFile@,
-@StatusFile@,
-@ReadBinChan@,
-@AppendBinChan@,
-@StatusChan@,
-@SetEnv@. Also, we do not support the optional I/O @Requests@.
-
-\item[@AppendChan@ and @ReadChan@ requests:]
-The former only works for \tr{stdout} and \tr{stderr}; the
-latter only for \tr{stdin}.
-
-\item[@Echo@ request:]
-We don't do anything at all.
-\end{description}
-
-%************************************************************************
-%* *
-\subsection[infelicities-Prelude]{In Prelude support}
-%* *
-%************************************************************************
+\item[Polymorphic @seq@:]
+Required by the Haskell~1.3 prelude; not done yet. Consequently,
+the @strict@ function doesn't really do what you want either.
-\begin{description}
%-------------------------------------------------------------------
\item[Arbitrary-sized tuples:]
-Plain old tuples of arbitrary size {\em do} work.
-Note that lots
-of overloading can give rise to large tuples ``under the hood'' of
-your program.
+Plain old tuples of arbitrary size {\em do} work. Note that lots of
+overloading can give rise to large tuples ``under the hood'' of your
+program.
-HOWEVER: standard instances for tuples (@Eq@, @Ord@, @Ix@, and
-@Binary@) are available {\em only} up to 5-tuples; except @Binary@,
-which we don't do at all.
+HOWEVER: standard instances for tuples (@Eq@, @Ord@, @Bounded@, @Ix@
+@Read@, and @Show@) are available {\em only} up to 5-tuples.
These limitations are easily subvertible, so please ask if you get
stuck on them.
\end{description}
-
-%************************************************************************
-%* *
-%\subsection[infelicities-Glasgow-exts]{In Glasgow extensions}
-%* *
-%************************************************************************
-
-%\begin{description}
-%-------------------------------------------------------------------
-%\item[Glasgow extensions not well ``packaged'':]
-%We would rather give you tidy interfaces to the primitive extensions
-%that GHC provides. For example, instead of your having to muck around
-%with...
-%\begin{verbatim}
-% ... _ccall_ fflush ``stderr'' `thenIO_Int_#` ...
-%\end{verbatim}
-%... (all very grimy); you should be able to import a \tr{LibC.hi}, and
-%pretend that @fflush@ is really a Haskell function!
-
-%This problem will be fixed when Haskell~1.3 comes into existence, and
-%we implement it.
-
-%-------------------------------------------------------------------
-%\item[@ArrRef@s of @Int#@s, @Float#@s, @Double#@s:]
-%Are not in yet, but will be. (Easy to add if you're desperate.)
-%\end{description}
-
-%************************************************************************
-%* *
-\section[vs-Haskell-1.3]{Haskell~1.3 DRAFT vs.~Glasgow Haskell~0.26}
-\index{GHC vs the DRAFT Haskell 1.3 language}
-\index{Haskell 1.3 language DRAFT vs GHC}
-%* *
-%************************************************************************
-
-There is work afoot on ``Haskell~1.3,'' a substantial revision of
-the Haskell~1.2 language.
-
-Haskell 1.3 is NOT a standard; it is NOT even a DRAFT standard. As of
-June 1995, there exists a 1.3 PROPOSAL, which will CERTAINLY change.
-Therefore, the ``1.3 things'' we ``support'' may change ARBITRARILY
-much, and we won't even be mildly apologetic about breaking programs
-that use ``1.3'' facilities.
-
-That said, there are two categories of ``1.3'' things that we commend
-to you.
-\begin{itemize}
-\item
-Things virtually certain to end up in any 1.3~standard. An example is
-the \tr{Maybe} type.
-\item
-Wobblier things which are so much better than their 1.2 equivalents
-that you will want to use them. We mean: monadic I/O.
-
-The basic I/O functions are ``unlikely'' to change and so are
-reasonably safe to adopt. (But see WARNING above...)
-\end{itemize}
-
-To use our 1.3 code, you should compile {\em and link} using a
-\tr{-fhaskell-1.3}\index{-fhaskell-1.3 option} flag.
-
-%************************************************************************
-%* *
-\subsection[duffer-1-3]{Duffer's guide for converting 1.2 I/O to 1.3 I/O}
-\index{I/O---converting 1.2 to 1.3}
-\index{Dialogue I/O--converting to 1.3}
-\index{1.2 I/O---converting to 1.3}
-%* *
-%************************************************************************
-
-Here is our ``crib sheet'' for converting 1.2 I/O to 1.3. In most cases,
-it's really easy.
-\begin{enumerate}
-\item
-Change \tr{readChan stdin} to \tr{getContents}.
-\item
-Change \tr{appendChan stdout} to \tr{putStr}, which is equivalent to
-\tr{hPutStr stdout}.
-Change \tr{appendChan stderr} to \tr{hPutStr stderr}.
-\item
-You need to \tr{import System} if you used @getArgs@, @getEnv@,
-or @getProgName@.
-\item
-Assuming continuation-style @Dialogue@ code, change \tr{... exit done $}
-to \tr{... >>}. Change \tr{... exit $ \ foo ->} to \tr{... >>= \ foo ->}.
-\item
-If you had any functions named \tr{(>>)}, \tr{(>>=)}, or \tr{return},
-change them to something else.
-\end{enumerate}
-
-Also:
-1.3 I/O all the way.
-\tr{Dialogue} usually turns into \tr{IO ()}.
-Use of \tr{StatusFile} request: rewrite (no equivalent exists).
-Add \tr{import Ratio} if you use \tr{Rationals} at all.
-Ditto: \tr{import Complex} if you use complex numbers.
-Ditto: \tr{import Array} if you use arrays. Also: note that
-Arrays now use ordinary pairs, rather than a separate \tr{Assoc} type.
-May be easier to do:
-infix 1 =:
-(=:) a b = (a,b)
-and switch := to =:
-This can happen: \tr{LiteralInt.leStringToInt}; add spaces.
-For \tr{minInt}/\tr{maxInt}, \tr{minChar}/\tr{maxChar} (???)
-use the \tr{Bounded} class methods, \tr{minBound} and \tr{maxBound}.
-Replace class \tr{Text} with \tr{Show}; on rare occasions,
-you may need to do something for \tr{Read}, too.
-The functions \tr{ord} and \tr{chr} have been replaced by
-the class methods \tr{fromEnum} and \tr{toEnum}, respectively.
-The changes, however, can lead to ambiguous overloading.
-Need \tr{import IO} for anything interesting.
-What was called \tr{handle} is now called \tr{catch}.
-New keyword: \tr{do}.
-Other clashes: e.g., \tr{seq}, \tr{fail}.
-\tr{readDec} no longer exists; use ???.
-Type of \tr{fail} changed?
-\tr{(a `op` b) c = ...} is bogus.
-`failWith x' now `fail x'
-`fail x' now `fail (userError x)'
-
-%************************************************************************
-%* *
-\subsection[nonio-1-3]{Non-I/O things from the 1.3-DRAFT proposal}
-%* *
-%************************************************************************
-
-Besides the I/O stuff, you also get these things when you use the
-\tr{-fhaskell-1.3}\index{-fhaskell-1.3 option} flag.
-
-Once again: ANY of thing might CHANGE COMPLETELY before we have ``1.3
-for real.''
-
-\begin{verbatim}
-data Either a b = Left a | Right b deriving (Text, Eq, Ord)
-
-data Maybe a = Nothing | Just a deriving (Eq, Ord, Text)
-
-thenMaybe :: Maybe a -> (a -> Maybe b) -> Maybe b
-thenMaybe Nothing _ = Nothing
-thenMaybe (Just x) f = f x
-
-curry :: ((a,b) -> c) -> a -> b -> c
-curry f x y = f (x,y)
-
-uncurry :: (a -> b -> c) -> (a,b) -> c
-uncurry f (x,y) = f x y
-\end{verbatim}
-\index{Maybe type (Haskell 1.3)}
-\index{Either type (Haskell 1.3)}
-\index{curry function (Haskell 1.3)}
-\index{uncurry function (Haskell 1.3)}
-
-%************************************************************************
-%* *
-\subsection[io-1-3]{Vs~1.3 monadic I/O}
-\index{GHC vs the DRAFT 1.3 I/O proposal}
-\index{DRAFT 1.3 I/O proposal vs GHC}
-%* *
-%************************************************************************
-
-The most notable improvement in Haskell~1.3 is its I/O, with a shift to
-``monadic-style'' I/O.
-
-We still offer direct access to the so-called \tr{PrimIO} monad, via
-the \tr{PreludeGlaST} interface. This is NON-STANDARD, an extension.
-This interface is described in \Sectionref{io-1-3-prim-interface}.
-
-The old \tr{PreludePrimIO} interface is DEAD.
-
-The even-older \tr{PreludeGlaIO} interface is DEADER.
-
-%************************************************************************
-%* *
-\subsubsection[io-1-3-shortcomings]{Known shortcomings in monadic I/O}
-%* *
-%************************************************************************
-
-Before you begin with ``1.3-style'' monadic I/O, you might as well
-know the known shortcomings of our implementation, as at 0.26.
-
-The error type is called \tr{IOError13}, rather than \tr{IOError}
-\index{IOError13 vs IOError}
-(which is still the 1.2 type). (Prelude types cannot be renamed,
-so...) You probably shouldn't be messing with \tr{IOError} much,
-anyway.
-
-Some of the 1.3 I/O code, notably the Extremely Cool \tr{Posix}
-stuff, is relatively untested. Go for it, but be wary...
-\index{Posix library bugs}
-\index{bugs, Posix library}
-
-%************************************************************************
-%* *
-\subsubsection[io-1-3-main-interface]{1.3-style monadic I/O}
-%* *
-%************************************************************************
-
-To use our 1.3 I/O, you should compile {\em and link} using a
-\tr{-fhaskell-1.3}\index{-fhaskell-1.3 option} flag.
-
-You should consult the PROPOSED 1.3-I/O standard. GHC~0.26 implements
-the ``December 1994'' draft, which we distribute in
-\tr{ghc/docs/io-1.3/}.
-
-Alternatively, you could grab the ``June 1995'' draft, from
-\tr{pub/haskell/report/}, on \tr{ftp.dcs.glasgow.ac.uk}. The main
-December--June change that you need to know about is: many of the I/O
-functions have been removed from \tr{Prelude*} interfaces (no import
-required) and put into \tr{Lib*} interfaces (import required).
-
-GHC~0.26 still provides the I/O functions via \tr{Prelude.hi} (no
-import required). Ignore the ``June draft'' pleadings for
-\tr{import IO}, and you'll be fine.
-
-{\em There is no guarantee that the final 1.3 proposal will look
-anything like the current DRAFT.} It ain't a standard until the fat
-committee sings.
-
-For interaction with our non-standard \tr{PrimIO}, including
-\tr{_ccall_}s. we also provide:
-\begin{verbatim}
--- impedance matching stuff
-ioToPrimIO :: IO a -> PrimIO a
-\end{verbatim}
-
-%************************************************************************
-%* *
-\subsubsection[io-1-3-prim-interface]{Access to the \tr{PrimIO} monad}
-\index{PrimIO monad (Glasgow extension)}
-\index{I/O, primitive (Glasgow extension)}
-%* *
-%************************************************************************
-
-In what we have implemented, \tr{PrimIO} is the
-handle-the-errors-yourself monad (NB: used for C-calls and such);
-whereas \tr{IO} is the 1.3-ish we-handle-errors-for-you monad.
-
-Should you may need to play with the \tr{PrimIO} monad directly, you
-can import \tr{PreludeGlaST}.
-
-NB: You used to get this stuff from the \tr{PreludePrimIO} interface,
-which is now deceased. As of 0.26, you get all things
-state-transforming from the \tr{PreludeGlaST} interface.
-
-The usual monadic stuff for \tr{PrimIO}:
-\begin{verbatim}
-returnPrimIO :: a -> PrimIO a
-thenPrimIO :: PrimIO a -> (a -> PrimIO b) -> PrimIO b
-seqPrimIO :: PrimIO a -> PrimIO b -> PrimIO b
-fixPrimIO :: (a -> PrimIO a) -> PrimIO a
-foldrPrimIO :: (a -> b -> PrimIO b) -> PrimIO b -> [a] -> PrimIO b
-listPrimIO :: [PrimIO a] -> PrimIO [a]
-mapPrimIO :: (a -> PrimIO b) -> [a] -> PrimIO [b]
-mapAndUnzipPrimIO :: (a -> PrimIO (b,c)) -> [a] -> PrimIO ([b],[c])
-forkPrimIO :: PrimIO a -> PrimIO a
-
-unsafePerformPrimIO :: PrimIO a -> a
-unsafeInterleavePrimIO :: PrimIO a -> PrimIO a
- -- and they are not called "unsafe" for nothing!
-\end{verbatim}
-
-And some other stuff:
-\begin{verbatim}
-data _FILE -- corresponds to a "FILE *" in C
- -- in classes Eq, _CCallable, and _CReturnable
-
-fclose :: _FILE -> PrimIO Int
-fdopen :: Int -> String -> PrimIO _FILE
-fflush :: _FILE -> PrimIO Int
-fopen :: String -> String -> PrimIO _FILE
-fread :: Int -> Int -> _FILE -> PrimIO (Int, _ByteArray Int)
-freopen :: String -> String -> _FILE -> PrimIO _FILE
-fwrite :: _ByteArray Int -> Int -> Int -> _FILE -> PrimIO Int
-
--- please AVOID using these (They will probably die)
-appendChanPrimIO :: String -> String -> PrimIO ()
-appendFilePrimIO :: String -> String -> PrimIO ()
-getArgsPrimIO :: PrimIO [String]
-readChanPrimIO :: String -> PrimIO String
-\end{verbatim}
-
-%************************************************************************
-%* *
-\subsubsection[own-mainPrimIO]{Using your own @mainPrimIO@}
-\index{mainPrimIO, rolling your own}
-%* *
-%************************************************************************
-
-Normally, the GHC runtime system begins things by called an internal
-function @mainPrimIO :: PrimIO ()@ which, in turn, fires up
-@dialogueToIO :: Dialogue -> IO ()@, linking in {\em your} @Main.main@
-to provide the @Dialogue@.
-
-(If you give a \tr{-fhaskell-1.3} flag, then a {\em different}
-@mainPrimIO@ will be linked in---that's why it is important to link
-with \tr{-fhaskell-1.3}...)
-
-To subvert the above process, you need only provide
-a @mainPrimIO :: PrimIO ()@ of your own
-(in a module named \tr{GHCmain}). Do {\em not} use a \tr{-fhaskell-1.3} flag!
-
-Here's a little example, stolen from Alastair Reid:
-\begin{verbatim}
-module GHCmain ( mainPrimIO ) where
-
-import PreludeGlaST
-
-mainPrimIO :: PrimIO ()
-mainPrimIO =
- sleep 5 `seqPrimIO`
- _ccall_ printf "%d\n" (14::Int)
-
-sleep :: Int -> PrimIO ()
-sleep t = _ccall_ sleep t
-\end{verbatim}
diff --git a/ghc/driver/Jmakefile b/ghc/driver/Jmakefile
index 507055379a..5c7c1097f2 100644
--- a/ghc/driver/Jmakefile
+++ b/ghc/driver/Jmakefile
@@ -1,7 +1,6 @@
/* stuff to have before we get going */
MsubNeededHere(ghc)
UnlitNeededHere(depend)
-InfoStuffNeededHere(docs)
DYN_LOADABLE_BITS = \
ghc-asm.prl \
diff --git a/ghc/driver/ghc-asm.lprl b/ghc/driver/ghc-asm.lprl
index 0907b0945c..eded9b3955 100644
--- a/ghc/driver/ghc-asm.lprl
+++ b/ghc/driver/ghc-asm.lprl
@@ -64,7 +64,7 @@ sub init_TARGET_STUFF {
$T_hsc_cc_PAT = '\.ascii.*\)(hsc|cc) (.*)\\\\11"\n\t\.ascii\s+"(.*)\\\\0"';
$T_DOT_WORD = '\.quad';
- $T_DOT_GLOBAL = "\t\.globl";
+ $T_DOT_GLOBAL = '^\t\.globl';
$T_HDR_literal = "\.rdata\n\t\.align 3\n";
$T_HDR_misc = "\.text\n\t\.align 3\n";
$T_HDR_data = "\.data\n\t\.align 3\n";
@@ -91,7 +91,7 @@ sub init_TARGET_STUFF {
$T_hsc_cc_PAT = '\.STRING.*\)(hsc|cc) (.*)\\\\x09(.*)\\\\x00';
$T_DOT_WORD = '\.word';
- $T_DOT_GLOBAL = '\s+\.EXPORT';
+ $T_DOT_GLOBAL = '^\s+\.EXPORT';
$T_HDR_literal = "\t.SPACE \$TEXT\$\n\t.SUBSPA \$LIT\$\n";
$T_HDR_misc = "\t.SPACE \$TEXT\$\n\t.SUBSPA \$CODE\$\n\t\.align 4\n";
$T_HDR_data = "\t.SPACE \$PRIVATE\$\n\t.SUBSPA \$DATA\$\n\t\.align 4\n";
@@ -121,11 +121,11 @@ sub init_TARGET_STUFF {
$T_hsc_cc_PAT = '\.ascii.*\)(hsc|cc) (.*)\\\\11"\n\t\.ascii\s+"(.*)\\\\0"';
$T_DOT_WORD = '\.long';
$T_DOT_GLOBAL = '\.globl';
- $T_HDR_literal = "\.text\n\t\.align 2\n"; # .align 4 is 486-cache friendly
+ $T_HDR_literal = "\.text\n\t\.align 2\n";
$T_HDR_misc = "\.text\n\t\.align 2,0x90\n";
- $T_HDR_data = "\.data\n\t\.align 2\n"; # ToDo: change align??
+ $T_HDR_data = "\.data\n\t\.align 2\n";
$T_HDR_consist = "\.text\n";
- $T_HDR_closure = "\.data\n\t\.align 2\n"; # ToDo: change align?
+ $T_HDR_closure = "\.data\n\t\.align 2\n";
$T_HDR_info = "\.text\n\t\.align 2\n"; # NB: requires padding
$T_HDR_entry = "\.text\n"; # no .align so we're right next to _info (arguably wrong...?)
$T_HDR_fast = "\.text\n\t\.align 2,0x90\n";
@@ -138,7 +138,8 @@ sub init_TARGET_STUFF {
$T_STABBY = 0; # 1 iff .stab things (usually if a.out format)
$T_US = ''; # _ if symbols have an underscore on the front
$T_DO_GC = 'PerformGC_wrapper';
- $T_PRE_APP = '/'; # regexp that says what comes before APP/NO_APP
+ $T_PRE_APP = # regexp that says what comes before APP/NO_APP
+ ($TargetPlatform =~ /-linux$/) ? '#' : '/' ;
$T_CONST_LBL = '^\.LC(\d+):$'; # regexp for what such a lbl looks like
$T_POST_LBL = ':';
$T_X86_PRE_LLBL_PAT = '\.L';
@@ -172,7 +173,7 @@ sub init_TARGET_STUFF {
$T_CONST_LBL = '^LC(\d+):$';
$T_POST_LBL = ':';
- $T_MOVE_DIRVS = '(\s*(\.align\s+\d+|\.proc\s+\d+|\.const|\.cstring|\.globl\s+\S+|\.text|\.data|\.even|\.stab[^n].*)\n)';
+ $T_MOVE_DIRVS = '^(\s*(\.align\s+\d+|\.proc\s+\d+|\.const|\.cstring|\.globl\s+\S+|\.text|\.data|\.even|\.stab[^n].*)\n)';
$T_COPY_DIRVS = '\.(globl|proc|stab)';
$T_hsc_cc_PAT = '\.ascii.*\)(hsc|cc) (.*)\\\\11"\n\t\.ascii\s+"(.*)\\\\0"';
@@ -199,12 +200,12 @@ sub init_TARGET_STUFF {
$T_CONST_LBL = '^\$LC(\d+):$'; # regexp for what such a lbl looks like
$T_POST_LBL = ':';
- $T_MOVE_DIRVS = '(\s*(\.align\s+\d+|\.(globl|ent)\s+\S+|\.text|\.r?data)\n)';
+ $T_MOVE_DIRVS = '^(\s*(\.align\s+\d+|\.(globl|ent)\s+\S+|\.text|\.r?data)\n)';
$T_COPY_DIRVS = '\.(globl|ent)';
$T_hsc_cc_PAT = 'I WAS TOO LAZY TO DO THIS BIT (WDP 95/05)';
$T_DOT_WORD = '\.word';
- $T_DOT_GLOBAL = '\t\.globl';
+ $T_DOT_GLOBAL = '^\t\.globl';
$T_HDR_literal = "\t\.rdata\n\t\.align 2\n";
$T_HDR_misc = "\t\.text\n\t\.align 2\n";
$T_HDR_data = "\t\.data\n\t\.align 2\n";
@@ -220,28 +221,28 @@ sub init_TARGET_STUFF {
} elsif ( $TargetPlatform =~ /^powerpc-.*/ ) {
$T_STABBY = 0; # 1 iff .stab things (usually if a.out format)
- $T_US = '\.'; # _ if symbols have an underscore on the front
+ $T_US = ''; # _ if symbols have an underscore on the front
$T_DO_GC = 'PerformGC_wrapper';
$T_PRE_APP = 'NOT APPLICABLE'; # regexp that says what comes before APP/NO_APP
$T_CONST_LBL = '^LC\.\.(\d+):$'; # regexp for what such a lbl looks like
$T_POST_LBL = ':';
- $T_MOVE_DIRVS = '^(\s*(\.align\s+\d+(,0x90)?|\.globl\s+\S+|\.text|\.data|\.section\s+.*|\.type\s+.*|\.Lfe.*\n\t\.size\s+.*|\.size\s+.*|\.ident.*)\n)';
- $T_COPY_DIRVS = '\.(globl)';
+ $T_MOVE_DIRVS = '^(\s*(\.toc|.csect \S+|\.l?globl \S+|\.align \d+)\n)';
+ $T_COPY_DIRVS = '\.(l?globl)';
$T_hsc_cc_PAT = '\.string.*\)(hsc|cc) (.*)\\\\t(.*)"';
$T_DOT_WORD = '\.long';
$T_DOT_GLOBAL = '\.globl';
- $T_HDR_literal = "\.section\t\.rodata\n"; # or just use .text??? (WDP 95/11)
- $T_HDR_misc = "\.text\n\t\.align 16\n";
- $T_HDR_data = "\.data\n\t\.align 4\n"; # ToDo: change align??
+ $T_HDR_literal = "\.section\t\.rodata\n";
+ $T_HDR_misc = "\.text\n\t\.align 2\n";
+ $T_HDR_data = "\.data\n\t\.align 2\n";
$T_HDR_consist = "\.text\n";
- $T_HDR_closure = "\.data\n\t\.align 4\n"; # ToDo: change align?
- $T_HDR_info = "\.text\n\t\.align 16\n"; # NB: requires padding
- $T_HDR_entry = "\.text\n"; # no .align so we're right next to _info (arguably wrong...?)
- $T_HDR_fast = "\.text\n\t\.align 16\n";
- $T_HDR_vector = "\.text\n\t\.align 16\n"; # NB: requires padding
- $T_HDR_direct = "\.text\n\t\.align 16\n";
+ $T_HDR_closure = "\.data\n\t\.align 2\n";
+ $T_HDR_info = "\.text\n\t\.align 2\n";
+ $T_HDR_entry = "\.text\n";
+ $T_HDR_fast = "\.text\n\t\.align 2\n";
+ $T_HDR_vector = "\.text\n\t\.align 2\n";
+ $T_HDR_direct = "\.text\n\t\.align 2\n";
#--------------------------------------------------------#
} elsif ( $TargetPlatform =~ /^sparc-.*-solaris2/ ) {
@@ -258,7 +259,7 @@ sub init_TARGET_STUFF {
$T_hsc_cc_PAT = '\.asciz.*\)(hsc|cc) (.*)\\\\t(.*)"';
$T_DOT_WORD = '\.word';
- $T_DOT_GLOBAL = '\.global';
+ $T_DOT_GLOBAL = '^\t\.global';
$T_HDR_literal = "\.text\n\t\.align 8\n";
$T_HDR_misc = "\.text\n\t\.align 4\n";
$T_HDR_data = "\.data\n\t\.align 8\n";
@@ -379,7 +380,9 @@ sub mangle_asm {
next if /^;/ && $TargetPlatform =~ /^hppa/;
- next if /(^$|^\t\.file\t|^ # )/ && $TargetPlatform =~ /^mips-/;
+ next if /(^$|^\t\.file\t|^ # )/ && $TargetPlatform =~ /^(mips|powerpc)-/;
+
+ last if /^_section_\.text:$/ && $TargetPlatform =~ /^powerpc-/;
if ( $TargetPlatform =~ /^mips-/
&& /^\t\.(globl \S+ \.text|comm\t)/ ) {
@@ -505,7 +508,9 @@ sub mangle_asm {
} elsif ( /^${T_US}[A-Za-z0-9_]/o
&& ( $TargetPlatform !~ /^hppa/ # need to avoid local labels in this case
- || /^L\$\d+$/ ) ) {
+ || ! /^L\$\d+$/ )
+ && ( $TargetPlatform !~ /^powerpc/ # ditto
+ || ! /^(L\.\.\d+|LT\.\..*):$/ ) ) {
local($thing);
chop($thing = $_);
print STDERR "Funny global thing?: $_"
@@ -584,13 +589,20 @@ sub mangle_asm {
print STDERR "$Pgm: unknown prologue mangling? $TargetPlatform\n";
}
- die "Prologue junk?: $p\n" if $p =~ /^\t[^\.]/;
+ die "Prologue junk?: $p\n" if $p =~ /^\t[^\.]/
+ && $TargetPlatform !~ /^powerpc-/; #ToDo: remove test
# glue together what's left
$c = $p . $r;
+ $c =~ s/\n\t\n/\n/; # junk blank line
}
}
+ if ( $TargetPlatform =~ /^mips-/ ) {
+ # MIPS: first, this basic sequence may occur "--- END ---" or not
+ $c =~ s/^\tlw\t\$31,\d+\(\$sp\)\n\taddu\t\$sp,\$sp,\d+\n\tj\t\$31\n\t\.end/\t\.end/;
+ }
+
# toss all epilogue stuff; again, paranoidly
if ( $c =~ /--- END ---/ ) {
if (($r, $e) = split(/--- END ---/, $c)) {
@@ -610,7 +622,8 @@ sub mangle_asm {
} else {
print STDERR "$Pgm: unknown epilogue mangling? $TargetPlatform\n";
}
- die "Epilogue junk?: $e\n" if $e =~ /^\t[^\.]/;
+ die "Epilogue junk?: $e\n" if $e =~ /^\t[^\.]/
+ && $TargetPlatform !~ /^powerpc-/; #ToDo: remove test
# glue together what's left
$c = $r . $e;
@@ -630,7 +643,7 @@ sub mangle_asm {
# On Alphas, the prologue mangling is done a little later (below)
# toss all calls to __DISCARD__
- $c =~ s/^\t(call|jbsr|jal) ${T_US}__DISCARD__\n//go;
+ $c =~ s/^\t(call|jbsr|jal)\s+${T_US}__DISCARD__\n//go;
# MIPS: that may leave some gratuitous asm macros around
# (no harm done; but we get rid of them to be tidier)
@@ -655,7 +668,6 @@ sub mangle_asm {
while ( $c =~ /${T_MOVE_DIRVS}FUNNY#END#THING/o ) {
$to_move = $1;
-
if ( $i < ($numchks - 1)
&& ( $to_move =~ /${T_COPY_DIRVS}/
|| ($TargetPlatform =~ /^hppa/ && $to_move =~ /align/ && $chkcat[$i+1] eq 'literal') )) {
@@ -766,9 +778,13 @@ sub mangle_asm {
$consist =~ s/\//./g;
$consist =~ s/-/_/g;
$consist =~ s/[^A-Za-z0-9_.]/ZZ/g; # ToDo: properly?
- print OUTASM $T_HDR_consist, "${consist}${T_POST_LBL}\n"
- if $TargetPlatform !~ /^mips-/; # we just don't try in that case
- } else {
+ print OUTASM $T_HDR_consist, "${consist}${T_POST_LBL}\n";
+
+ } elsif ( $TargetPlatform !~ /^(mips|powerpc)-/ ) { # we just don't try in those case (ToDo)
+ # on mips: consistency string is just a v
+ # horrible bunch of .bytes,
+ # which I am too lazy to sort out (WDP 95/05)
+
print STDERR "Couldn't grok consistency: ", $chk[$i];
}
@@ -854,7 +870,7 @@ sub mangle_asm {
if ( ! defined($slowchk{$symb})
# ToDo: the || clause can go once we're no longer
# concerned about producing exactly the same output as before
- || $TargetPlatform =~ /^(m68k|sparc|i386)-/
+#OLD: || $TargetPlatform =~ /^(m68k|sparc|i386)-/
) {
print OUTASM $T_HDR_fast;
}
@@ -894,6 +910,12 @@ sub mangle_asm {
&tidy_up_and_die(1,"$Pgm: unknown chkcat (ghc-asm: $TargetPlatform)\n$chkcat[$i]\n$chk[$i]\n");
}
}
+
+ print OUTASM $EXTERN_DECLS if $TargetPlatform =~ /^mips-/;
+
+ print OUTASM ".csect .text[PR]\n_section_.text:\n.csect .data[RW]\n\t.long _section_.text\n"
+ if $TargetPlatform =~ /^powerpc-/;
+
# finished
close(OUTASM) || &tidy_up_and_die(1,"Failed writing to $out_asmf\n");
close(INASM) || &tidy_up_and_die(1,"Failed reading from $in_asmf\n");
@@ -1088,7 +1110,12 @@ sub print_doctored {
# final peephole fixes
s/^\tmovl \%eax,36\(\%ebx\)\n\tjmp \*36\(\%ebx\)\n/\tmovl \%eax,36\(\%ebx\)\n\tjmp \*\%eax\n/;
- s/^\tmovl \$_(.*),(\%e[abcd]x)\n\tjmp \*$2/\tjmp _$1/g;
+# the short form may tickle perl bug:
+# s/^\tmovl \$${T_US}(.*),(\%e[abcd]x)\n\tjmp \*$2/\tjmp $T_US$1/g;
+ s/^\tmovl \$${T_US}(.*),\%eax\n\tjmp \*\%eax/\tjmp $T_US$1/g;
+ s/^\tmovl \$${T_US}(.*),\%ebx\n\tjmp \*\%ebx/\tjmp $T_US$1/g;
+ s/^\tmovl \$${T_US}(.*),\%ecx\n\tjmp \*\%ecx/\tjmp $T_US$1/g;
+ s/^\tmovl \$${T_US}(.*),\%edx\n\tjmp \*\%edx/\tjmp $T_US$1/g;
# Hacks to eliminate some reloads of Hp. Worth about 5% code size.
# We could do much better than this, but at least it catches about
@@ -1157,6 +1184,8 @@ sub init_FUNNY_THINGS {
"${T_US}_PRMarking_MarkNextGA${T_POST_LBL}", 1,
"${T_US}_PRMarking_MarkNextRoot${T_POST_LBL}", 1,
"${T_US}_PRMarking_MarkNextSpark${T_POST_LBL}", 1,
+ "${T_US}_PRMarking_MarkNextEvent${T_POST_LBL}", 1,
+ "${T_US}_PRMarking_MarkNextClosureInFetchBuffer${T_POST_LBL}", 1,
"${T_US}_Scavenge_Forward_Ref${T_POST_LBL}", 1,
"${T_US}__std_entry_error__${T_POST_LBL}", 1,
"${T_US}_startMarkWorld${T_POST_LBL}", 1,
@@ -1192,7 +1221,7 @@ sub rev_tbl {
for ($i = 0; $i <= $#lines && $lines[$i] !~ /^\t${T_DOT_WORD}\s+/o; $i++) {
$label .= $lines[$i] . "\n",
next if $lines[$i] =~ /^[A-Za-z0-9_]+_info${T_POST_LBL}$/o
- || $lines[$i] =~ /^${T_DOT_GLOBAL}/o
+ || $lines[$i] =~ /${T_DOT_GLOBAL}/o
|| $lines[$i] =~ /^${T_US}vtbl_\S+${T_POST_LBL}$/o;
$before .= $lines[$i] . "\n"; # otherwise...
diff --git a/ghc/driver/ghc-iface.lprl b/ghc/driver/ghc-iface.lprl
index 5f0fe311b1..6d3bde1777 100644
--- a/ghc/driver/ghc-iface.lprl
+++ b/ghc/driver/ghc-iface.lprl
@@ -20,8 +20,21 @@ sub postprocessHiFile {
# run diff if they asked for it
if ($HiDiff_flag && ! $HiOnStdout && ! $going_interactive && -f $hifile_target) {
- &run_something("$Cmp -s $hifile_target $new_hi || $Diff $hifile_target $new_hi 1>&2 || exit 0",
- "Diff'ing old and new .$HiSuffix files"); # NB: to stderr
+ if ( $HiDiff_flag eq 'usages' ) {
+ # lots of near-useless info; but if you want it...
+ &run_something("$Cmp -s $hifile_target $new_hi || $Diff $hifile_target $new_hi 1>&2 || exit 0",
+ "Diff'ing old and new .$HiSuffix files"); # NB: to stderr
+ } else {
+ # strip out usages, *then* run diff
+ local($hi_before) = "$Tmp_prefix.hi-before";
+ local($hi_after) = "$Tmp_prefix.hi-now";
+
+ &deUsagifyHi($hifile_target, $hi_before);
+ &deUsagifyHi($new_hi, $hi_after);
+
+ &run_something("$Cmp -s $hi_before $hi_after || $Diff $hi_before $hi_after 1>&2 || exit 0",
+ "Diff'ing old and new .$HiSuffix files"); # NB: to stderr
+ }
}
# if we produced an interface file "no matter what",
@@ -33,6 +46,34 @@ sub postprocessHiFile {
"Replace .$HiSuffix file, if changed");
}
}
+
+sub deUsagifyHi {
+ local($ifile,$ofile) = @_;
+
+ open(OLDHIF, "< $ifile") || &tidy_up_and_die(1,"Can't open $ifile (read)\n");
+ open(NEWHIF, "> $ofile") || &tidy_up_and_die(1,"Can't open $ofile (write)\n");
+
+ # read up to __usages__ line
+ $_ = <OLDHIF>;
+ while ($_ ne '' && ! /^__usages__/) {
+ print NEWHIF $_ unless /^(interface |\{-# GHC_PRAGMA)/;
+ $_ = <OLDHIF>;
+ }
+ if ( $_ ne '' ) {
+ # skip to next __<anything> line
+ $_ = <OLDHIF>;
+ while ($_ ne '' && ! /^__/) { $_ = <OLDHIF>; }
+
+ # print the rest
+ while ($_ ne '') {
+ print NEWHIF $_;
+ $_ = <OLDHIF>;
+ }
+ }
+
+ close(OLDHIF) || &tidy_up_and_die(1,"Failed reading from $ifile\n");
+ close(NEWHIF) || &tidy_up_and_die(1,"Failed writing to $ofile\n");
+}
\end{code}
\begin{code}
@@ -135,6 +176,7 @@ sub readHiFile {
local($now_in) = '';
hi_line: while (<HIFILE>) {
next if /^ *$/; # blank line
+ next if /\{-# GHC_PRAGMA INTERFACE VERSION 20 #-\}/;
# avoid pre-1.3 interfaces
#print STDERR "now_in:$now_in:$_";
@@ -237,7 +279,8 @@ sub printNewItemVersion {
local($item, $mod_version) = @_;
if (! defined($Decl{"new:$item"}) ) {
- print STDERR "$item: no decl?! (nothing into __versions__)\n";
+# it's OK, because the thing is almost-certainly wired-in
+# print STDERR "$item: no decl?! (nothing into __versions__)\n";
return;
}
diff --git a/ghc/driver/ghc-recomp.lprl b/ghc/driver/ghc-recomp.lprl
index 3414605e8d..39efdb9780 100644
--- a/ghc/driver/ghc-recomp.lprl
+++ b/ghc/driver/ghc-recomp.lprl
@@ -33,7 +33,7 @@ sub runRecompChkr {
$hi_atime,$hi_mtime,$hi_ctime,$hi_blksize,$hi_blocks) = stat(_); # stat info from -f test
if ($i_mtime > $o_mtime) {
- print STDERR "$Pgm:recompile:Input file $ifile newer than $ofile_target ($i_mtime > $o_mtime)\n";
+ print STDERR "$Pgm:recompile:Input file $ifile newer than $ofile_target\n";
return(1);
}
@@ -121,10 +121,6 @@ print STDERR "considering used entities...\n";
}
return 0 if ! $used_entities_have_changed;
- print STDERR "ifile $ifile:\t$i_mtime\n";
- print STDERR "ofile $ofile_target:\t$o_mtime\n";
- print STDERR "hifile $hifile_target:\t$hi_mtime\n";
-
return(1); # OK, *recompile*
}
\end{code}
diff --git a/ghc/driver/ghc.lprl b/ghc/driver/ghc.lprl
index 94b1eda4ef..3777be9a5b 100644
--- a/ghc/driver/ghc.lprl
+++ b/ghc/driver/ghc.lprl
@@ -31,12 +31,12 @@ possible phases of a compilation:
- hscpp: run code through the C pre-processor (if -cpp flag given)
- hsc: run the Haskell compiler proper
- gcc: run the C compiler (if compiling via C)
- - as: run the Unix assembler
- - ld: run the Unix linker
+ - as: run the assembler
+ - ld: run the linker
For each input file, the phase to START with is determined by the
file's suffix:
- - .lhs literate Haskell: lit2pgm
+ - .lhs literate Haskell: unlit
- .hs illiterate Haskell: hsc
- .hc C from the Haskell compiler: gcc
- .c C not from the Haskell compiler: gcc
@@ -208,11 +208,11 @@ $Oopt_MonadEtaExpansion = '';
$Oopt_FinalStgProfilingMassage = '';
$Oopt_StgStats = '';
$Oopt_SpecialiseUnboxed = '';
-$Oopt_DoSpecialise = '-fspecialise';
-$Oopt_FoldrBuild = 1; # On by default!
-$Oopt_FB_Support = '-fdo-arity-expand';
+$Oopt_DoSpecialise = ''; # ToDo:LATER: '-fspecialise';
+$Oopt_FoldrBuild = 0; # *Off* by default!
+$Oopt_FB_Support = ''; # was '-fdo-arity-expand';
#$Oopt_FoldrBuildWW = 0; # Off by default
-$Oopt_FoldrBuildInline = '-fdo-inline-foldr-build';
+$Oopt_FoldrBuildInline = ''; # was '-fdo-inline-foldr-build';
\end{code}
Things to do with C compilers/etc:
@@ -406,7 +406,8 @@ require special handling.
@SysImport_dir = ( $(INSTALLING) )
? ( "$InstDataDirGhc/imports" )
: ( "$TopPwd/$(CURRENT_DIR)/$(GHC_LIBSRC)/prelude"
- , "$TopPwd/$(CURRENT_DIR)/$(GHC_LIBSRC)/required" );
+ , "$TopPwd/$(CURRENT_DIR)/$(GHC_LIBSRC)/required"
+ , "$TopPwd/$(CURRENT_DIR)/$(GHC_LIBSRC)/concurrent" );
$GhcVersionInfo = int ($(PROJECTVERSION) * 100);
$Haskell1Version = 3; # i.e., Haskell 1.3
@@ -417,11 +418,13 @@ $Haskell1Version = 3; # i.e., Haskell 1.3
@SysLibrary_dir = ( ( $(INSTALLING) ) #-syslib things supplied by the system
? $InstLibDirGhc
- : ("$TopPwd/$(CURRENT_DIR)/$(GHC_RUNTIMESRC)",
- "$TopPwd/$(CURRENT_DIR)/$(GHC_RUNTIMESRC)/gmp",
- "$TopPwd/$(CURRENT_DIR)/$(GHC_LIBSRC)")
+ : ( "$TopPwd/$(CURRENT_DIR)/$(GHC_RUNTIMESRC)"
+ , "$TopPwd/$(CURRENT_DIR)/$(GHC_RUNTIMESRC)/gmp"
+ , "$TopPwd/$(CURRENT_DIR)/$(GHC_LIBSRC)"
+ , "$TopPwd/$(CURRENT_DIR)/$(GHC_LIBSRC)/cbits"
+ )
);
-@SysLibrary = ( '-lHS' ); # basic I/O and prelude stuff
+@SysLibrary = (); # will be built up as we go along
$TopClosureFile # defaults to 1.2 one; will be mangled later
= ( $(INSTALLING) ) ? "$InstLibDirGhc/TopClosureXXXX.o"
@@ -445,10 +448,10 @@ $PostprocessCcOutput = 0;
$HaveNativeCodeGen = $(GHC_WITH_NATIVE_CODEGEN);
$HscOut = '-C='; # '-C=' ==> .hc output; '-S=' ==> .s output; '-N=' ==> neither
$HscOut = '-S='
- if $HaveNativeCodeGen && $TargetPlatform =~ /^(alpha|sparc|i386)-/;
+ if $HaveNativeCodeGen && $TargetPlatform =~ /^(alpha|sparc)-/; #ToDo: add |i386 !
$ProduceHi = '-hifile=';
$HiOnStdout = 0;
-$HiDiff_flag = 0;
+$HiDiff_flag = '';
$CollectingGCstats = 0;
$CollectGhcTimings = 0;
@@ -469,6 +472,7 @@ $Specific_output_file = ''; # set by -o <file>; "-" for stdout
$Specific_hi_file = ''; # set by -ohi <file>; "-" for stdout
$Specific_dump_file = ''; # set by -odump <file>; "-" for stdout
$Using_dump_file = 0;
+$Isuffix = '';
$Osuffix = ''; # default: use the normal suffix for that kind of output
$HiSuffix = 'hi';
$SysHiSuffix= 'hi';
@@ -568,7 +572,7 @@ arg: while($_ = $ARGV[0]) {
/^-v$/ && do { $Verbose = '-v'; $Time = 'time'; next arg; };
#---------- what phases are to be run ----------------------------------
- /^-short$/ && do { $Do_recomp_chkr = 1; next arg; };
+ /^-recomp/ && do { $Do_recomp_chkr = 1; next arg; };
/^-cpp$/ && do { $Cpp_flag_set = 1; next arg; };
# change the global default:
@@ -589,8 +593,9 @@ arg: while($_ = $ARGV[0]) {
/^-nohi$/ && do { $ProduceHi = '-nohifile='; next arg; };
# don't generate an interface (even if generating C)
- /^-hi-diffs$/ && do { $HiDiff_flag = 1; next arg; };
- /^-no-hi-diffs$/ && do { $HiDiff_flag = 0; next arg; };
+ /^-hi-diffs$/ && do { $HiDiff_flag = 'normal'; next arg; };
+ /^-hi-diffs-with-usages$/ && do { $HiDiff_flag = 'usages'; next arg; };
+ /^-no-hi-diffs$/ && do { $HiDiff_flag = ''; next arg; };
# show/disable diffs if the interface file changes
/^-E$/ && do { push(@CcBoth_flags, '-E');
@@ -638,6 +643,14 @@ arg: while($_ = $ARGV[0]) {
}
next arg; };
+ # NB: -isuf not documented yet (because it doesn't work yet)
+ /^-isuf$/ && do { $Isuffix = &grab_arg_arg('-isuf', '');
+ if ($Isuffix =~ /\./ ) {
+ print STDERR "$Pgm: -isuf suffix shouldn't contain a .\n";
+ $Status++;
+ }
+ next arg; };
+
/^-osuf$/ && do { $Osuffix = &grab_arg_arg('-osuf', '');
if ($Osuffix =~ /\./ ) {
print STDERR "$Pgm: -osuf suffix shouldn't contain a .\n";
@@ -807,7 +820,14 @@ arg: while($_ = $ARGV[0]) {
? "$InstSysLibDir/$syslib/imports"
: "$TopPwd/hslibs/$syslib/src");
- unshift(@SysLibrary, ('-lHS' . $syslib ));
+ if (! $(INSTALLING)) {
+ push(@SysLibrary_dir,
+ ("$TopPwd/hslibs/$syslib"
+ ,"$TopPwd/hslibs/$syslib/cbits"));
+ }
+
+ push(@SysLibrary, ("-lHS$syslib"
+ ,"-lHS${syslib}_cbits"));
next arg; };
@@ -859,10 +879,12 @@ arg: while($_ = $ARGV[0]) {
/^-fno-implicit-prelude$/ && do { $NoImplicitPrelude= 1; push(@HsC_flags, $_); next arg; };
# ToDo: rename to -fcompiling-ghc-internals=<module>
+ # NB: not documented
/^-fcompiling-ghc-internals(.*)/ && do { local($m) = &grab_arg_arg('-fcompiling-ghc-internals',$1);
push(@HsC_flags, "-fcompiling-ghc-internals=$m");
next arg; };
+ # NB: not really put to use and not documented
/^-fusing-ghc-internals$/ && do { $UsingGhcInternals = 1; next arg; };
/^-user-prelude-force/ && do { # ignore if not -user-prelude
@@ -887,21 +909,12 @@ arg: while($_ = $ARGV[0]) {
}
next arg; };
- /^-f(hide-builtin-names|min-builtin-names)$/
- && do { push(@HsC_flags, $_);
-# push(@HsC_flags, '-fno-implicit-prelude'); # don't read Prelude.hi
-# push(@HsP_flags, '-N'); # allow foo# names
- next arg; };
/^-fglasgow-exts$/
&& do { push(@HsC_flags, $_);
push(@HsP_flags, '-N');
# push(@HsC_flags, '-fshow-import-specs');
-# if ( ! $(INSTALLING) ) {
-# unshift(@SysImport_dir,
-# "$TopPwd/$(CURRENT_DIR)/$(GHC_LIBSRC)/glaExts");
-# }
next arg; };
/^-fspeciali[sz]e-unboxed$/
@@ -940,10 +953,6 @@ arg: while($_ = $ARGV[0]) {
#print "No Foldback of append\n";
next arg; };
-# /^-ffoldr-build-ww$/
-# && do { $Oopt_FoldrBuildWW = 1; next arg; };
-
-
# ---------------
/^-fasm-(.*)$/ && do { $HscOut = '-S='; next arg; }; # force using nativeGen
@@ -986,7 +995,8 @@ arg: while($_ = $ARGV[0]) {
&squashHscFlag("-f$1");
next arg; };
- /^-f/ && do { push(@HsC_flags, $_); next arg; };
+ /^-f(show-import-specs)/
+ && do { push(@HsC_flags, $_); next arg; };
# ---------------
@@ -1034,9 +1044,9 @@ arg: while($_ = $ARGV[0]) {
if ($heap_size <= 0) {
print STDERR "$Pgm: resetting heap-size to zero!!!\n";
$Specific_heap_size = 0;
- }
+
# if several heap sizes given, take the largest...
- if ($heap_size >= $Specific_heap_size) {
+ } elsif ($heap_size >= $Specific_heap_size) {
$Specific_heap_size = $heap_size;
} else {
print STDERR "$Pgm: ignoring heap-size-setting option ($_)...not the largest seen\n";
@@ -1055,9 +1065,9 @@ arg: while($_ = $ARGV[0]) {
if ($stk_size <= 0) {
print STDERR "$Pgm: resetting stack-size to zero!!!\n";
$Specific_stk_size = 0;
- }
+
# if several stack sizes given, take the largest...
- if ($stk_size >= $Specific_stk_size) {
+ } elsif ($stk_size >= $Specific_stk_size) {
$Specific_stk_size = $stk_size;
} else {
print STDERR "$Pgm: ignoring stack-size-setting option (-Rmax-stksize $stk_size)...not the largest seen\n";
@@ -1300,7 +1310,6 @@ It really really wants to be the last STG-to-STG pass that is run.
'\)',
$Oopt_AddAutoSccs,
# '-ffull-laziness', # removed 95/04 WDP following Andr\'e's lead
- '-fuse-get-mentioned-vars', # for the renamer
$Oopt_FinalStgProfilingMassage
);
@@ -1331,6 +1340,7 @@ It really really wants to be the last STG-to-STG pass that is run.
'-ffloat-primops-ok',
'-fcase-of-case',
'-fdo-case-elim',
+ '-fcase-merge',
'-fdo-eta-reduction',
'-fdo-lambda-eta-expansion',
'-freuse-con',
@@ -1340,7 +1350,7 @@ It really really wants to be the last STG-to-STG pass that is run.
$Oopt_MaxSimplifierIterations,
'\)',
- '-fcalc-inlinings1',
+#LATER: '-fcalc-inlinings1', -- pointless for 2.01
# ($Oopt_FoldrBuildWW) ? (
# '-ffoldr-build-ww-anal',
@@ -1352,6 +1362,7 @@ It really really wants to be the last STG-to-STG pass that is run.
# '-ffloat-primops-ok',
# '-fcase-of-case',
# '-fdo-case-elim',
+# '-fcase-merge',
# '-fdo-eta-reduction',
# '-fdo-lambda-eta-expansion',
# '-freuse-con',
@@ -1378,6 +1389,7 @@ It really really wants to be the last STG-to-STG pass that is run.
'-ffloat-primops-ok',
'-fcase-of-case',
'-fdo-case-elim',
+ '-fcase-merge',
'-fdo-eta-reduction',
'-fdo-lambda-eta-expansion',
'-freuse-con',
@@ -1397,6 +1409,7 @@ It really really wants to be the last STG-to-STG pass that is run.
'-ffloat-primops-ok',
'-fcase-of-case',
'-fdo-case-elim',
+ '-fcase-merge',
'-fdo-eta-reduction',
'-fdo-lambda-eta-expansion',
'-freuse-con',
@@ -1419,6 +1432,7 @@ It really really wants to be the last STG-to-STG pass that is run.
'-ffloat-primops-ok',
'-fcase-of-case',
'-fdo-case-elim',
+ '-fcase-merge',
'-fdo-eta-reduction',
'-fdo-lambda-eta-expansion',
'-freuse-con',
@@ -1436,7 +1450,7 @@ It really really wants to be the last STG-to-STG pass that is run.
# ( ($OptLevel != 2)
# ? ''
-# : "-fliberate-case -fsimplify \\( $Oopt_FB_Support -ffloat-lets-exposing-whnf -ffloat-primops-ok -fcase-of-case -fdo-case-elim -fdo-eta-reduction -fdo-lambda-eta-expansion -freuse-con -flet-to-case $Oopt_PedanticBottoms $Oopt_MonadEtaExpansion $Oopt_UnfoldingUseThreshold $Oopt_MaxSimplifierIterations \\)" ),
+# : "-fliberate-case -fsimplify \\( $Oopt_FB_Support -ffloat-lets-exposing-whnf -ffloat-primops-ok -fcase-of-case -fdo-case-elim -fcase-merge -fdo-eta-reduction -fdo-lambda-eta-expansion -freuse-con -flet-to-case $Oopt_PedanticBottoms $Oopt_MonadEtaExpansion $Oopt_UnfoldingUseThreshold $Oopt_MaxSimplifierIterations \\)" ),
# Final clean-up simplification:
@@ -1447,6 +1461,7 @@ It really really wants to be the last STG-to-STG pass that is run.
'-ffloat-primops-ok',
'-fcase-of-case',
'-fdo-case-elim',
+ '-fcase-merge',
'-fdo-eta-reduction',
'-fdo-lambda-eta-expansion',
'-freuse-con',
@@ -1463,10 +1478,10 @@ It really really wants to be the last STG-to-STG pass that is run.
# '-fstatic-args',
- '-fcalc-inlinings2',
+#LATER: '-fcalc-inlinings2', -- pointless for 2.01
# stg2stg passes
- '-fupdate-analysis',
+#LATER: '-fupdate-analysis',
'-flambda-lift',
$Oopt_FinalStgProfilingMassage,
$Oopt_StgStats,
@@ -1653,6 +1668,8 @@ if ($TargetPlatform =~ /^alpha-/) {
# we know how to *mangle* asm for PowerPC
unshift(@CcRegd_flags, ('-D__STG_REV_TBLS__'));
unshift(@CcRegd_flags, ('-DSTACK_CHECK_BY_PAGE_FAULT=1')) if $StkChkByPageFaultOK;
+ unshift(@CcBoth_flags, ('-static')); # always easier to start with
+ unshift(@CcRegd_flags, ('-finhibit-size-directive')); # avoids traceback tables
} elsif ($TargetPlatform =~ /^sparc-/) {
# we know how to *mangle* asm for SPARC
@@ -1712,8 +1729,11 @@ if ( ! $(INSTALLING) ) {
\end{code}
\begin{code}
+push(@SysLibrary, ( '-lHS', '-lHS_cbits' )); # basic I/O and prelude stuff
+
local($f);
foreach $f (@SysLibrary) {
+ next if $f =~ /_cbits/;
$f .= $BuildTag if $f =~ /^-lHS/;
}
@@ -1915,9 +1935,11 @@ args: while ($a = shift(@ARGV)) {
}
}
-exec "$SysMan $debug $pvm_executable $nprocessors @nonPVM_args";
-print STDERR "Exec failed!!!: $SysMan $debug $nprocessors @nonPVM_args\n";
-exit(1);
+local($return_val) = 0;
+system("$SysMan $debug $pvm_executable $nprocessors @nonPVM_args");
+$return_val = $?;
+system("mv $ENV{'HOME'}/$pvm_executable_base.???.gr .") if -f "$ENV{'HOME'}/$pvm_executable_base.001.gr";
+exit($return_val);
EOSCRIPT2
close(EXEC) || die "Failed closing $executable\n";
chmod 0755, $executable;
@@ -2005,7 +2027,7 @@ Again, we'll do the post-recompilation-checker parts of this later.
} elsif ($ifile =~ /\.hs$/) {
$do_lit2pgm = 0;
$lit2pgm_hscpp = $ifile;
- } elsif ($ifile =~ /\.hc$/) {
+ } elsif ($ifile =~ /\.hc$/ || $ifile =~ /_hc$/ ) { # || $ifile =~ /\.$Isuffix$/o) # ToDo: better
$do_lit2pgm = 0; $do_hscpp = 0; $do_hsc = 0; $do_cc = 1;
$hsc_out = $ifile;
} elsif ($ifile =~ /\.c$/) {
@@ -2066,7 +2088,11 @@ Check if hsc needs to be run at all.
$more_processing_required
= &runRecompChkr($ifile, $hscpp_hsc, $ifile_root, $ofile_target, $hifile_target);
- print STDERR "$Pgm:recompile: NOT NEEDED!\n" if ! $more_processing_required;
+ if ( ! $more_processing_required ) {
+ print STDERR "$Pgm:recompile: NOT NEEDED!\n"; # Yay!
+ # propagate dependency:
+ &run_something("touch $ofile_target", "Touch $ofile_target, to propagate dependencies");
+ }
}
$do_hsc = 0, $do_cc = 0, $do_as = 0 if ! $more_processing_required;
@@ -2117,7 +2143,9 @@ Finally, decide what to queue up for linker input.
# tentatively assume we will eventually produce linker input:
push(@Link_file, &odir_ify($ifile_root, 'o'));
- if ($ifile !~ /\.(lhs|hs|hc|c|s)$/) {
+#ToDo: local($or_isuf) = ($Isuffix eq '') ? '' : "|$Isuffix";
+
+ if ( $ifile !~ /\.(lhs|hs|hc|c|s)$/ && $ifile !~ /_hc$/ ) {
print STDERR "$Pgm: don't recognise suffix on `$ifile'; passing it through to linker\n"
if $ifile !~ /\.a$/;
@@ -2311,8 +2339,8 @@ sub already_mapped_err {
return if $m_ino == $p_ino; # same inode number
- print STDERR "$Pgm: module $mod already mapped to $mapped_to (inode $m_ino)";
- print STDERR ";\n\tignoring: $path (inode $p_ino)\n";
+ print STDERR "$Pgm: module $mod already mapped to $mapped_to";
+ print STDERR ";\n\tignoring: $path\n";
}
\end{code}
@@ -2419,23 +2447,23 @@ sub runMangler {
&mangle_asm($cc_as_o, $cc_as);
#OLD: for sanity:
- local($target) = 'oops';
- $target = '-alpha' if $TargetPlatform =~ /^alpha-/;
- $target = '-hppa' if $TargetPlatform =~ /^hppa/;
- $target = '' if $TargetPlatform =~ /^i386-/;
- $target = '-m68k' if $TargetPlatform =~ /^m68k-/;
- $target = '-mips' if $TargetPlatform =~ /^mips-/;
- $target = '' if $TargetPlatform =~ /^powerpc-/;
- $target = '-solaris' if $TargetPlatform =~ /^sparc-sun-solaris2/;
- $target = '-sparc' if $TargetPlatform =~ /^sparc-sun-sunos4/;
-
- if ( $target ne '' ) {
- require("ghc-asm$target.prl")
- || &tidy_up_and_die(1,"$Pgm: panic: can't load ghc-asm$target.prl!\n");
- &mangle_asm($cc_as_o, "$cc_as-2"); # the OLD one!
- &run_something("$Cmp -s $cc_as-2 $cc_as || $Diff $cc_as-2 $cc_as 1>&2 || exit 0",
- "Diff'ing old and new mangled .s files"); # NB: to stderr
- }
+#OLD: local($target) = 'oops';
+#OLD: $target = '-alpha' if $TargetPlatform =~ /^alpha-/;
+#OLD: $target = '-hppa' if $TargetPlatform =~ /^hppa/;
+#OLD: $target = '-old-asm' if $TargetPlatform =~ /^i386-/;
+#OLD: $target = '-m68k' if $TargetPlatform =~ /^m68k-/;
+#OLD: $target = '-mips' if $TargetPlatform =~ /^mips-/;
+#OLD: $target = '' if $TargetPlatform =~ /^powerpc-/;
+#OLD: $target = '-solaris' if $TargetPlatform =~ /^sparc-sun-solaris2/;
+#OLD: $target = '-sparc' if $TargetPlatform =~ /^sparc-sun-sunos4/;
+#OLD:
+#OLD: if ( $target ne '' ) {
+#OLD: require("ghc-asm$target.prl")
+#OLD: || &tidy_up_and_die(1,"$Pgm: panic: can't load ghc-asm$target.prl!\n");
+#OLD: &mangle_asm($cc_as_o, "$cc_as-2"); # the OLD one!
+#OLD: &run_something("$Cmp -s $cc_as-2 $cc_as || $Diff $cc_as-2 $cc_as 1>&2 || exit 0",
+#OLD: "Diff'ing old and new mangled .s files"); # NB: to stderr
+#OLD: }
} elsif ($TargetPlatform =~ /^hppa/) {
# minor mangling of non-threaded files for hp-pa only
@@ -2659,7 +2687,7 @@ next argument (\tr{-Rmax-heapsize 8M}). We allow both cases.
Note: no error-checking; \tr{-Rmax-heapsize -Rgc-stats} will silently
gobble the second argument (and probably set the heapsize to something
-nonsensical). (ToDo?)
+nonsensical).
\begin{code}
sub grab_arg_arg {
local($option, $rest_of_arg) = @_;
diff --git a/ghc/driver/test_mangler b/ghc/driver/test_mangler
index 5cfe057770..f24f0e4bc0 100644
--- a/ghc/driver/test_mangler
+++ b/ghc/driver/test_mangler
@@ -2,12 +2,12 @@
# a simple wrapper to test a .s-file mangler
# reads stdin, writes stdout
-$which = $ARGV[0]; shift; # nice error checking, Will
+$TargetPlatform = $ARGV[0]; shift; # nice error checking, Will
-require("ghc-asm-$which.prl") || die "require mangler failed!\n";
+require("ghc-asm.prl") || die "require mangler failed!\n";
$SpX86Mangling = 1;
-$StolenX86Regs = 5;
+$StolenX86Regs = 4;
open(INP, "> /tmp/mangle1.$$") || die "Can't open tmp file 1\n";
while (<>) {
diff --git a/ghc/includes/COptRegs.lh b/ghc/includes/COptRegs.lh
index 866d11ab37..eaf46f94ae 100644
--- a/ghc/includes/COptRegs.lh
+++ b/ghc/includes/COptRegs.lh
@@ -266,8 +266,8 @@ is used by most of the current garbage collectors.
Mark (GC) register mapping:
\begin{verbatim}
- sparc m68k alpha mipseX hppa iX86 rs6000
- ----- ---- ----- ------ ---- ---- ------
+ sparc m68k alpha mipseX hppa iX86 powerpc
+ ----- ---- ----- ------ ---- ---- -------
MarkBase ebx
Mark i0 a2 $9 $16 r4 ebp
@@ -427,8 +427,8 @@ reside in global registers, because the scan code is not threaded.
Scan (GC) register mapping:
\begin{verbatim}
- sparc m68k alpha mipseX hppa iX86 rs6000
- ----- ---- ----- ------ ---- ---- ------
+ sparc m68k alpha mipseX hppa iX86 powerpc
+ ----- ---- ----- ------ ---- ---- -------
ScanBase g4
Scan a2 $9 $16 r4 ebx
@@ -558,8 +558,8 @@ etc. or there'll be trouble. ADR
Scavenge (GC) register mapping:
\begin{verbatim}
- sparc m68k alpha mipseX hppa iX86 rs6000
- ----- ---- ----- ------ ---- ---- ------
+ sparc m68k alpha mipseX hppa iX86 powerpc
+ ----- ---- ----- ------ ---- ---- -------
ScavBase g4
Scav a2 $9 $16 r4 ebx
@@ -734,8 +734,8 @@ IF YOU CHANGE THIS TABLE, YOU MAY NEED TO CHANGE CallWrapper.s
documentation in the porter's part of the installation guide.
\begin{verbatim}
- sparc m68k alpha mipseX hppa iX86 rs6000
- ----- ---- ----- ------ ---- ---- ------
+ sparc m68k alpha mipseX hppa iX86 powerpc
+ ----- ---- ----- ------ ---- ---- -------
BaseReg# a5 ebx
StkOReg (CONCURRENT)
diff --git a/ghc/includes/COptWraps.lh b/ghc/includes/COptWraps.lh
index 4334caec34..baf217b663 100644
--- a/ghc/includes/COptWraps.lh
+++ b/ghc/includes/COptWraps.lh
@@ -613,11 +613,10 @@ gets whatever it's after.
"\tstd %i2,[%fp-32]\n" \
"\tstd %i4,[%fp-24]");
-/* We leave nothing to chance here; we have seen
- GCC stick "unwanted" code in the branch delay
- slot, causing mischief (WDP 96/05)
+/* Lest GCC attempt to stick something in
+ the delay slot: with compile with
+ -fno-delayed-branch. A weak solution. WDP 96/07
*/
-#ifdef GRAN
#define MAGIC_CALL \
__asm__ volatile ( \
"ld [%%fp-40],%%o5\n" \
@@ -631,7 +630,16 @@ gets whatever it's after.
__asm__ volatile ( \
"std %f0,[%fp-40]\n" \
"\tstd %o0,[%fp-32]");
-#else
+#if 0
+/* We leave nothing to chance here; we have seen
+ GCC stick "unwanted" code in the branch delay
+ slot, causing mischief (WDP 96/05)
+*/
+/* the problem with this one: GCC has no way of
+ knowing there is a "call" in there, so it
+ does not do any calling-convention stuff
+ (e.g., saving used regs). WDP 96/07
+*/
#define MAGIC_CALL \
__asm__ volatile ( \
"ld [%%fp-40],%%o5\n" \
@@ -644,8 +652,8 @@ gets whatever it's after.
"\tnop\n" \
"\tstd %%f0,[%%fp-40]\n"\
"\tstd %%o0,[%%fp-32]" \
- : : : "%o0", "%o1", "%o2", "%o3", "%o4", "%o5", "%f0", "%g1", "%g2", "%g3", "%g4", "memory");
-#endif
+ : : : "%o0", "%o1", "%o2", "%o3", "%o4", "%o5", "%o7", "%f0", "memory");
+#endif /* 0 */
#define MAGIC_RETURN \
__asm__ volatile ( \
diff --git a/ghc/includes/CostCentre.lh b/ghc/includes/CostCentre.lh
index 06c1e84b86..a63390751d 100644
--- a/ghc/includes/CostCentre.lh
+++ b/ghc/includes/CostCentre.lh
@@ -47,7 +47,7 @@ source using the @CC_DECLARE@ macro where @label@, @module@ and
\begin{code}
# define CC_IS_CAF 'c'
# define CC_IS_DICT 'd'
-# define CC_IS_SUBSUMED 'S'
+# define CC_IS_SUBSUMED 's'
# define CC_IS_BORING 'B'
# define STATIC_CC_REF(cc_ident) &CAT2(cc_ident,_struct)
@@ -156,11 +156,9 @@ CC_EXTERN(CC_DONTZuCARE); /* placeholder only */
CC_EXTERN(CC_CAFs); /* prelude cost centre (CAFs only) */
CC_EXTERN(CC_DICTs); /* prelude cost centre (DICTs only) */
-# define IS_CAF_OR_DICT_CC(cc) \
+# define IS_CAF_OR_DICT_OD_SUB_CC(cc) \
((cc)->is_subsumed & ' ') /* tests for lower case character */
-# define IS_SUBSUMED_CC(cc) ((cc)->is_subsumed == CC_IS_SUBSUMED)
-
\end{code}
Definitions referring to the Cost Centre sub-field of the fixed header.
@@ -268,7 +266,7 @@ On entering a closure we only count the enter to thunks ...
do { \
CostCentre cc = (CostCentre) (centre); \
ASSERT_IS_REGISTERED(cc,1); \
- if ( ! IS_CAF_OR_DICT_CC(cc) ) { \
+ if ( ! IS_CAF_OR_DICT_OR_SUB_CC(cc) ) { \
CCC = cc; \
} else { \
CCC_DETAIL_COUNT(cc->caffun_subsumed); \
@@ -308,7 +306,7 @@ On entering a closure we only count the enter to thunks ...
do { \
CostCentre cc = (CostCentre) (centre); \
ASSERT_IS_REGISTERED(cc,1); \
- if ( ! IS_CAF_OR_DICT_CC(cc) ) { \
+ if ( ! IS_CAF_OR_DICT_OR_SUB_CC(cc) ) { \
CCC = cc; \
} else { \
CCC_DETAIL_COUNT(cc->caffun_subsumed); \
diff --git a/ghc/includes/GranSim.lh b/ghc/includes/GranSim.lh
index e2da0d152a..ea92718c38 100644
--- a/ghc/includes/GranSim.lh
+++ b/ghc/includes/GranSim.lh
@@ -108,6 +108,18 @@ typedef struct event {
struct event *next;
} *eventq;
+#if (defined(GCap) || defined(GCgn))
+typedef struct clos /* a queue of ex-RBHs (needed for gen GC only) */
+{
+ struct clos *prev, *next;
+ P_ closure;
+} *closq;
+
+#define CLOS_CLOSURE(clos) (clos->closure)
+#define CLOS_PREV(clos) (clos->prev)
+#define CLOS_NEXT(clos) (clos->next)
+#endif
+
/* Macros for accessing components of the event structure */
#define EVENT_PROC(evt) (evt->proc)
#define EVENT_CREATOR(evt) (evt->creator)
@@ -122,10 +134,6 @@ typedef struct event {
/* Maximum number of PEs that can be simulated */
#define MAX_PROC (BITS_IN(W_))
-#if 0
-extern W_ IdleProcs, Idlers;
-#endif
-
/* Processor numbers to bitmasks and vice-versa */
#define MainProc 0 /* Id of main processor */
#define MAX_PRI 10000 /* max possible priority */
@@ -235,6 +243,8 @@ void newevent PROTO((PROC proc, PROC creator, TIME time, EVTTYPE
evttype, P_ tso, P_ node, sparkq spark));
void prepend_event PROTO((eventq event));
eventq grab_event PROTO((STG_NO_ARGS));
+void traverse_eventq_for_gc PROTO((STG_NO_ARGS));
+
void print_event PROTO((eventq event));
void print_eventq PROTO((eventq hd));
void print_spark PROTO((sparkq spark));
@@ -273,8 +283,8 @@ IF_RTS(void end_gr_simulation(STG_NO_ARGS);)
simulated packet buffer size.
*/
-#define GRANSIM_DEFAULT_PACK_BUFFER_SIZE 200
-#define REALLOC_SZ 50
+#define GRANSIM_DEFAULT_PACK_BUFFER_SIZE 400
+#define REALLOC_SZ 200
/* extern W_ gran_mpacktime, gran_mtidytime, gran_munpacktime; */
diff --git a/ghc/includes/LLC.h b/ghc/includes/LLC.h
index c097e4005a..d8550f40ca 100644
--- a/ghc/includes/LLC.h
+++ b/ghc/includes/LLC.h
@@ -84,7 +84,7 @@ void get_opcode_and_sender PROTO((PACKET p, OPCODE *popcode, GLOBAL_TASK_ID *pse
GLOBAL_TASK_ID *PEStartUp PROTO((unsigned nPEs));
void PEShutDown(STG_NO_ARGS);
-void (*ExceptionHandler)(STG_NO_ARGS);
+void (*ExceptionHandler) PROTO((PACKET));
#endif /*PAR */
#endif /*defined __LLC_H */
diff --git a/ghc/includes/Parallel.lh b/ghc/includes/Parallel.lh
index b92e4c292d..6a565dbead 100644
--- a/ghc/includes/Parallel.lh
+++ b/ghc/includes/Parallel.lh
@@ -547,13 +547,14 @@ P_ get_closure_info PROTO((P_ closure, W_ *size, W_ *ptrs, W_ *nonptrs, W_
# define PACK_HDR_SIZE 4 /* Words of header in a packet */
# define PACK_HEAP_REQUIRED \
- ((RTSflags.GranFlags.packBufferSize - PACK_HDR_SIZE) / (PACK_GA_SIZE + _FHS) * (SPEC_HS + 2))
+ ((RTSflags.GranFlags.packBufferSize - PACK_HDR_SIZE) / (PACK_GA_SIZE \
+ + _FHS) * (SPEC_HS + 2))
# define PACK_FLAG_LOCN 0
# define PACK_TSO_LOCN 1
# define PACK_UNPACKED_SIZE_LOCN 2
# define PACK_SIZE_LOCN 3
-# define MAGIC_PACK_FLAG 0xfabc
+# define MAGIC_PACK_FLAG 0xfabc
#endif
#endif /* Parallel_H */
diff --git a/ghc/includes/RtsTypes.lh b/ghc/includes/RtsTypes.lh
index 7e226520be..191e8b20c5 100644
--- a/ghc/includes/RtsTypes.lh
+++ b/ghc/includes/RtsTypes.lh
@@ -88,10 +88,10 @@ typedef struct cc {
char *group; /* name of group in which _scc_ occurs */
char is_subsumed; /* 'B' => *not* a CAF/dict/sub cc */
- /* 'S' => *is* a subsumed cc */
+ /* 's' => *is* a subsumed cc */
/* 'c' => *is* a CAF cc */
/* 'd' => *is* a dictionary cc */
- /* IS_CAF_OR_DICT tests for lowercase bit */
+ /* IS_CAF_OR_DICT_OR_SUB_CC tests for lowercase bit */
/* Statistics Gathered */
diff --git a/ghc/includes/Threads.lh b/ghc/includes/Threads.lh
index 4b9a72201e..f0f88a5474 100644
--- a/ghc/includes/Threads.lh
+++ b/ghc/includes/Threads.lh
@@ -133,9 +133,6 @@ extern I_ threadId;
void ScheduleThreads PROTO((P_ topClosure));
#if defined(GRAN)
-#define OLD_SPARKNAME_MASK 0xffff0000
-#define NEW_SPARKNAME_MASK 0x0000ffff
-
void ReSchedule PROTO((int what_next)) STG_NORETURN;
void add_to_spark_queue PROTO((sparkq));
int set_sparkname PROTO((P_, int));
diff --git a/ghc/includes/c-as-asm.lit b/ghc/includes/c-as-asm.lit
deleted file mode 100644
index 4925d79909..0000000000
--- a/ghc/includes/c-as-asm.lit
+++ /dev/null
@@ -1,509 +0,0 @@
-\begin{onlystandalone}
-\documentstyle[11pt,literate,a4wide]{article}
-\begin{document}
-\title{C as the assembly language for the Glasgow Haskell compiler}
-\author{Patrick Sansom, with help from his enemies}
-\date{June 1992}
-\maketitle
-\begin{rawlatex}
-\tableofcontents
-\end{rawlatex}
-\end{onlystandalone}
-
-\begin{onlypartofdoc}
-\section[C-as-assembler]{Using C for our assembler}
-\downsection
-\end{onlypartofdoc}
-
-% this file describes the issues;
-% it then \inputs all the actual source bits that do the job
-
-
-%************************************************************************
-%* *
-\section[C-as-asm-intro-portable]{C as assembler: introduction to ``portable C''}
-%* *
-%************************************************************************
-
-The Glasgow Haskell compiler expresses its output in C for the usual
-reasons: (1)~We hope it will be more readily portable to new machines;
-(2)~We'd rather not waste our time writing a register-allocator,
-instruction-selection mumbo-jumbo, etc.---in short, the dreary,
-tedious bits of a code-generator.
-
-The execution model that underlies Glasgow Haskell is the
-Spineless-Tagless G-machine (STG-machine). The constituent pieces of
-the STG-machine model that must be fashioned out of good old-fashioned
-C code are, roughly:
-\begin{description}
-\item[Dynamic heap:] The usual.
-
-\item[Stack space:] With two stacks (A and B) growing from each end of a space,
-towards each other. To pass (some) arguments, return addresses, ``update
-frames'', ...
-
-\item[Various ``STG registers'':] Stack pointers, heap pointers,
-return registers---all abstract machine-model entities...
-
-\item[Tables of static read-only info:] Mostly so-called ``info
-tables''...
-
-\item[Code:] Lots of code fragments that do nothing but tail-call each
-other.
-\end{description}
-
-OK, as mentioned, goal Numero Uno in using C as the compiler's target
-is to produce code that's relatively easy to port. To this end, we
-need very-plain-vanilla bits of C to do all of the above things.
-Roughly speaking, here's what comes out:
-
-\begin{description}
-\item[Dynamic heap:] @malloc(3)@ a chunk of memory and use that.
-
-\item[Stack space:] Ditto.
-
-\item[Various ``STG registers'':] These are just global variables (or
-words in a ``global register table''), each just a 32|64-bit word (i.e.,
-not a funny structure, no funny tag bits, etc.---just very ordinary
-global variables).
-
-\item[Tables of static read-only info:] Initialised arrays of 32|64-bit
-words; they tend to come out looking something like (behind a wall of
-CPP macros) [very straightforward]:
-\begin{verbatim}
-const StgWord foo_info[] = { (StgWord) foo_entry, 0, 0 }
-\end{verbatim}
-
-\item[Code:] This is the tricky part: it's hard to convince any C compiler to do
-tail-calls, and certainly not all C compilers all of the time.
-
-Fortunately, {\em all} code fragments in the STG world can be coerced
-into C ``functions'' of the form:
-\begin{itemize}
-\item
-{\em No} arguments (all the STG-world function arguments, etc., go via
-the STG-world stacks and registers, not the C-world stacks/registers);
-\item
-They {\em always} know the next C fragment to be called (it's all
-tail-recursive, after all).
-\end{itemize}
-
-So: we dictate that every one of these code fragments is an
-argument-less C function of its own, which {\em returns} the address
-of the next code fragment to be executed (i.e., its continuation). Here
-is an example of one such fragment (simplified from actual code):
-
-\begin{verbatim}
-stg_1848_entry (/* no args */) {
- STK_CHK(0,-5);
- PUSH_STD_UPD_FRAME(Node,StdUpdRetVecReg,0,0);
- *(SpB+5)=(StgWord)(vtbl_1930);
- Node=(StgPtr)(*(Node+SPEC_HS));
- SpB=SpB+5;
- return(*Node); /* *Node points at next code to enter */
-}
-\end{verbatim}
-
-Now all we need is a so-called {\em mini-interpreter} to dispatch
-these returned continuations; this is the basic idea:
-
-\begin{verbatim}
-StgFunPtr continuation = (StgFunPtr) start_cont;
-
-while ( 1 ) { continuation = (StgFunPtr) (continuation)(); }
-\end{verbatim}
-\end{description}
-
-If you are utterly baffled at this point, you probably need to read
-the STG-machine paper.
-
-All of the above is dead simple, if not particularly whizzy; in the
-rest of this document, when we talk about ``portable~C'' (or, ``the
-portable version''),
-\index{portable C output}
-we mean this stuff.
-
-%************************************************************************
-%* *
-\section[C-as-asm-intro-fast]{C as assembler: going faster, introduction to ``optimised C''}
-%* *
-%************************************************************************
-
-%************************************************************************
-%* *
-\subsection[C-as-asm-portably-slow]{Why ``portable C'' is slow}
-%* *
-%************************************************************************
-
-Compiling Haskell programs via simple, utterly portable C, as outlined
-above, comes at a significant cost in code size and speed.
-
-\begin{description}
-\item[Dynamic heap:] No problems associated with this.
-
-\item[Stack space:] Or this.
-
-[Well, not quite. The C compiler won't take advantage of the {\em
-fact} (which it doesn't know) that the heap and stacks {\em cannot
-possibly overlap}. In a perfect world, it could use this information
-to select better instruction sequences.)
-
-\item[Various ``STG registers'':] These are all in global C variables,
-about which C compilers are notoriously pessimistic.
-
-A load or store to one of these ``registers'' is almost certainly two
-or more instructions---not to mention stalls for going to memory. And
-their global-you-don't-know-anything-about-me nature (in the C
-compiler's view) is very optimisation-unfriendly.
-
-And, because they're the ``registers'' of the STG execution model,
-they are {\em FREQUENTLY} used! You {\em really pay} for their
-global-variableness... (estimates: 2--4x of code speed, 2x of code
-size)
-
-\item[Tables of static read-only info:] There aren't any big costs
-associated with these, except that you can't micro-manage their
-placement in memory, esp.~w.r.t.~entry-code fragments.
-(See \sectionref{C-as-asm-native}.)
-
-
-\item[Code:] Rather than really tail-jumping, we make many, many trips
-through the ``mini-interpreter.'' Besides all those instructions,
-there is probably plenty of pushing/popping of frames on/off the C
-stack, because each dispatch by the mini-interpreter is a C function
-call...
-
-Also, we don't ``fall through'' from argument-satisfaction-checking
-code into the real code for a function: we make an extra, fairly
-gratuitous trip through the mini-interpreter...
-
-We {\em estimate} that real tail-jumping should make programs go
-\pl{~25%} faster.
-\end{description}
-
-%************************************************************************
-%* *
-\subsection[C-as-asm-fast]{Solution: ``Optimising C,'' with our friend, Richard Stallman}
-%* *
-%************************************************************************
-
-The freely-available GNU~C compiler, GCC, (version 2.x), written under the
-lead of Richard Stallman at the Free Software Foundation, is a good
-C~compiler that has some non-standard extensions and machine-code
-hooks that make it possible to win back practically all the slow-nesses
-listed above for ``portable C''.
-
-First, there is a non-standard extension to C that makes it possible
-to put a ``global variable'' in a machine register. For example,
-something like
-
-\begin{verbatim}
-register StgPtr SpB __asm__("%i4");
-\end{verbatim}
-
-says ``global variable'' \tr{SpB} should live in machine-register \tr{%i4}.
-
-Second, GCC offers an ``extended \tr{__asm__}'' directive, which lets
-you inject raw assembly-language stuff into the C-compiler output.
-Inserting \tr{jmps} in this way is is one component of the
-do-real-tailjumps machinery...
-
-The other main component of doing-real-tailjumps is shutting down the
-C world's function-calling machinery, i.e., pushing/popping of stack
-frames (and/or register windows, in the SPARC case). This involves
-``sedding'' the C-compiler's assembly-language output, stripping off
-function prologues and epilogues, and other such really horrible
-stuff...
-
-{\em Caveat~1:} The above is machine- and compiler-specific. {\em
-Response:} We don't care. GCC is freely and widely available and has
-been ported to bazillions of machines.
-
-{\em Caveat~2:} The above constitutes {\em serious} mangling of what
-ends up in a \tr{.o} object file. Mixing such \tr{.o} files with
-others that were incompatibly compiled (e.g., w/ different
-magical-global-register declarations) is {\em guaranteed} to result in
-gruesome program death.
-
-{\em Response:} We treat optimised-C \tr{.o} files quite separately.
-First, this document carefully records how C files from different
-``worlds'' must be compiled (next section); also, our
-compilation-system driver checks that executables it links are built
-from compatible \tr{.o} files (see \sectionref{driver-consistency-chking}).
-
-The User's Guide describes how the beleaguered Haskell programmer
-interacts with all this C optimisation stuff... (executive summary:
-it's invisible.)
-
-[For a discussion of how well you can do on compiling via straight
-ANSI-standard C, see the paper from CMU about compiling ML...]
-
-%************************************************************************
-%* *
-\subsection[C-as-asm-worlds]{``Worlds'' that must interoperate in optimised~C}
-%* *
-%************************************************************************
-
-A Glasgow-Haskell-compiled executable is made of bits that come from
-various places, or ``worlds.'' These are:
-\begin{itemize}
-\item
-The major ``world'' is the {\em Haskell Threaded World} of
-direct-jumping, machine-register-using compiled-to-optimised-C Haskell
-code.
-
-\item
-The Haskell Threaded World may call into the {\em Arbitrary~C
-World}; for example, to do I/O. This is C code that is written by
-someone else, compiled by someone else, over which the Glasgow Haskell
-system has no control whatsoever. However, a most pleasant property
-of the Arbitrary~C World is that it doesn't care about any of the
-internals of the Haskell threaded world. In particular, it does
-not know about the heap, stack etc, and hence does not modify
-the magic ``registers'' @Hp@, @SpA@, etc.
-
-\item
-There may also be calls out to an {\em STG~C World}, of which the
-storage manager is the major example. (The use of the GNU
-multi-precision arithmetic package to do ``Integers'' is nearly
-another; except we do all the STG magic in-line, then do ``ordinary''
-C calls to the library after we know it's safe to do so.) An STG~C
-World is a blob of C that needs to inspect/save/restore some part of
-the Haskell-Threaded-World state, notably some of its registers. For
-example, the storage manager {\em really} needs to know where the heap
-pointer \tr{Hp}---in a machine register---is pointing to, before
-embarking on a garbage collection... :-)
-
-These STG~C Worlds are the tricky devils...
-
-{\em NB:} The storage manager is a direct-jumping threaded world of
-its own, with its own mapping-to-machine-registers, etc.
-
-\item
-The {\em C~Startup World}: The C runtime-system gets things going by
-invoking a user-supplied function, \tr{main} (or \tr{mainIO}, if
-Glasgow I/O is in force). We must have such a function (it is
-provided by the GHC RTS), and it must get us from there into the
-Haskell Threaded World.
-
-Similarly, when we finally emerge from the Haskell Threaded World, we
-must exit as a well-behave C program would, i.e., set the exit status,
-etc., etc.
-
-The C~Startup World is not a big problem.
-\end{itemize}
-
-%************************************************************************
-%* *
-\section[C-as-asm-issues]{Things to be {\em very careful} about with ``optimised C''}
-%* *
-%************************************************************************
-
-(Most of) These issues can be organised according to the affected
-World-to-World transition...
-
-%************************************************************************
-%* *
-\subsection[C-as-asm-starting-stopping]{Starting and stopping a Haskell Threaded World}
-%* *
-%************************************************************************
-
-\begin{itemize}
-\item
-As part of real-tailjumping support, we completely shut down the
-pushing/popping of C stack frames (by sedding off
-prologues/epilogues). This is fine---except the C compiler doesn't
-know we do this and may use the stack anyway (for temporaries,
-automatic variables, ...)! The initialisation of the Haskell Threaded
-World must therefore ensure that there is some/enough C-stack space
-sitting around for this purpose...
-
-\item
-The ``mini-interpreter'' must be re-entrant! The code for
-@error@---and bits of the RTS---actually exploit this.
-
-\item
-(C Startup to Haskell) Beginning of mini-interpreter: The STG
-registers (in real registers) must be initialised from the @smInfo@
-structure. Ending: @smInfo@ updated from the STG registers.
-\end{itemize}
-
-%************************************************************************
-%* *
-\subsection[C-as-asm-haskell-to-arbitrary-C]{Haskell Threaded to/from Arbitrary~C}
-%* *
-%************************************************************************
-
-Remember, ``Arbitrary C'' cannot modify any of the STG registers, so
-all that is required is to keep them safe across the call.
-
-Hence we can just call the arbitrary~C routine. But, {\em don't} map
-any STG registers onto any call-clobbered registers; the arbitrary~C
-may stomp on them. (Just use callee-save registers.) In the SPARC
-case, this means all \tr{%o} and several \tr{%g} registers are
-unavailable. GCC~2.x warns if you try to snaffle call-clobbered
-registers.
-
-%************************************************************************
-%* *
-\subsection[C-as-asm-haskell-to-STG-C]{Haskell Threaded to/from special ``STG~C''}
-%* *
-%************************************************************************
-
-This is the tricky business.
-
-[ToDo: re-make this section in its particulars (it is out-of-date);
-principles still valid.]
-
-\begin{itemize}
-\item
-The compiler ``knows'' about things to which it makes ``STG calls''...
-
-It emits macro calls of the form \tr{STGCALLn(f, arg1, arg2..., arg_n)};
-calls to C-function @f@, with @n@ arguments, returning (at most) one
-32|64-bit value. A use of this might look like...
-
-\begin{verbatim}
-x = STGCALL2( arby_prec_add, y, z );
-\end{verbatim}
-
-\item
-In portable~C, the above would just become an ordinary C
-function-call:
-
-\begin{verbatim}
-x = arby_prec_add(y,z);
-\end{verbatim}
-
-Also, in portable~C, the @xxx_SAVE@ backup global (see below) is
-\tr{#defined} to be the same as \tr{xxx}!
-
-\item
-In optimised~C, the @STGCALLn@ macros turn into (highly-machine
-dependent) uses of the ultra-magic @callWrapper@ function.
-
-At least in the SPARC case, using @STGCALL1@ as an example:
-
-\begin{verbatim}
-STGCALL1( f, x ) ---> %o5 := f; callWrapper(x)
-\end{verbatim}
-
-The @callWrapper@ function is very special indeed. It must preserve
-all the callee-saves registers (SPARC e.g.: all \tr{%i} and \tr{%l}
-registers). It is {\em NOT} tail-jumped to like the
-Haskell-Threaded-World routines. So all it does is:
-
-\begin{enumerate}
-\item
-Save the return address (SPARC: \tr{[%o7]+8}) into @continuation_SAVE@.
-
-\item
-Save all relevant STG register in their corresponding @_SAVE@ backup globals.
-(We might have @callWrapper@ variants to save different sets.)
-
-\item
-Call the function (SPARC: \tr{call %o5}); note that the function
-arguments are all in the right place already. (NOTE: It would be hard
-to think of something more machine- and compiler-assumptive! But,
-remember, we are calling code with which we are on friendly terms...)
-
-\item
-Restore the @_SAVE@ backup globals into the registers; the restore
-mustn't affect the single returned 32|64-bit value (SPARC case: in \tr{%o0}).
-
-\item
-@STGJUMP@ (tail-jump) to @continuation_SAVE@.
-\end{enumerate}
-
-N.B.: @callWrapper@ only works up to a limited number of arguments
-(SPARC: 5 words, \tr{%o0-%o4}), because we are using \tr{%o5} (SPARC)
-for the function to call. If we run into this limit, we should pass
-the function in a global instead of \tr{%o5} (or whatever).
-\end{itemize}
-
-%************************************************************************
-%* *
-\subsubsection[C-as-asm-haskell-to-GC]{...To the Storage Manager in particular...}
-%* *
-%************************************************************************
-
-The interface to the GC uses the same regime; having to save and restore
-all STG and ptr regs is no big deal, because it only happens once per GC.
-Other routines should only use SpA/B, Hp, HeapLimit, SuA/B (for GC).
-
-%************************************************************************
-%* *
-\section[C-as-asm-native]{Things that could be better with a native-code generator}
-%* *
-%************************************************************************
-
-Even with all the fancy GNU~C tricks and whatnot, the resulting code
-speed and size isn't as good as you could do if you wrote a full-blown
-native-code generator. We have no interest in doing this---the payoff
-isn't big enough---but here we list, for the record, Things That Could
-Be Better:
-
-\begin{enumerate}
-\item
-We could park info-tables and entry code in judicious proximity to
-each other, so @ENTER_CLOSURE@ would be a
-single-indirection-with-offset, rather than a double-indirection.
-\end{enumerate}
-
-%************************************************************************
-%* *
-\section[C-as-asm-registers]{IMPLEMENTATION: Mapping STG registers onto machine registers}
-%* *
-%************************************************************************
-
-We have several mappings from STG~registers onto real machine registers,
-for different segments of the runtime system. Each mapping is
-dependent on the target architecture as well.
-
-\downsection
-\input{StgRegs.lh} % decls of STG registers
-\upsection
-
-%************************************************************************
-%* *
-\section[C-as-asm-tailjumps]{IMPLEMENTATION: tail-jumping}
-%* *
-%************************************************************************
-
-\downsection
-\input{COptJumps.lh} % per-platform tailjumps (optimised C)
-
-\subsection[driver-sedding]{ToDo: driver sedding}
-
-THIS SHOULD BE THE SOURCE FOR THE PART OF THE DRIVER THAT MANGLES
-OPTIMISED C ASSEMBLER FILES.
-
-\input{../runtime/c-as-asm/StgMiniInt.lc}
-\upsection
-
-%************************************************************************
-%* *
-\section[C-as-asm-wrappers]{IMPLEMENTATION: ``wrappers'' to call out from the threaded world}
-%* *
-%************************************************************************
-
-\downsection
-\input{COptWraps.lh}
-
-\input{../runtime/c-as-asm/HpOverflow.lc}
-\upsection
-
-%************************************************************************
-%* *
-\section[driver-consistency-chking]{ToDo: driver consistency checking}
-%* *
-%************************************************************************
-
-THIS SHOULD BE THE SOURCE FOR THE PART OF THE DRIVER THAT CHECKS
-CONSISTENCY OF EXECUTABLES.
-
-
-\begin{onlystandalone}
-\printindex
-\end{document}
-\end{onlystandalone}
diff --git a/ghc/includes/error.h b/ghc/includes/error.h
index 488a180a16..fe12ee6c84 100644
--- a/ghc/includes/error.h
+++ b/ghc/includes/error.h
@@ -2,6 +2,9 @@
extern int ghc_errno;
extern int ghc_errtype;
extern char *ghc_errstr;
+
+void cvtErrno (void);
+void stdErrno (void);
#endif
#define ERR_ALREADYEXISTS 1
diff --git a/ghc/includes/ieee-flpt.h b/ghc/includes/ieee-flpt.h
index cef274d375..2f74896407 100644
--- a/ghc/includes/ieee-flpt.h
+++ b/ghc/includes/ieee-flpt.h
@@ -5,31 +5,39 @@
/* Radix of exponent representation */
#ifndef FLT_RADIX
-#define FLT_RADIX 2
+# define FLT_RADIX 2
#endif
/* Number of base-FLT_RADIX digits in the significand of a float */
#ifndef FLT_MANT_DIG
-#define FLT_MANT_DIG 24
+# define FLT_MANT_DIG 24
#endif
/* Minimum int x such that FLT_RADIX**(x-1) is a normalised float */
#ifndef FLT_MIN_EXP
-#define FLT_MIN_EXP (-125)
+# ifdef __GLASGOW_HASKELL__
+# define FLT_MIN_EXP (negate 125)
+# else
+# define FLT_MIN_EXP (-125)
+# endif
#endif
/* Maximum int x such that FLT_RADIX**(x-1) is a representable float */
#ifndef FLT_MAX_EXP
-#define FLT_MAX_EXP 128
+# define FLT_MAX_EXP 128
#endif
/* Number of base-FLT_RADIX digits in the significand of a double */
#ifndef DBL_MANT_DIG
-#define DBL_MANT_DIG 53
+# define DBL_MANT_DIG 53
#endif
/* Minimum int x such that FLT_RADIX**(x-1) is a normalised double */
#ifndef DBL_MIN_EXP
-#define DBL_MIN_EXP (-1021)
+# ifdef __GLASGOW_HASKELL__
+# define DBL_MIN_EXP (negate 1021)
+# else
+# define DBL_MIN_EXP (-1021)
+# endif
#endif
/* Maximum int x such that FLT_RADIX**(x-1) is a representable double */
#ifndef DBL_MAX_EXP
-#define DBL_MAX_EXP 1024
+# define DBL_MAX_EXP 1024
#endif
diff --git a/ghc/includes/libposix.h b/ghc/includes/libposix.h
deleted file mode 100644
index 4ce0ceafc1..0000000000
--- a/ghc/includes/libposix.h
+++ /dev/null
@@ -1,57 +0,0 @@
-#ifndef LIBPOSIX_H
-#ifdef HAVE_SYS_WAIT_H
-#include <sys/wait.h>
-#endif /* HAVE_SYS_WAIT_H */
-
-#ifdef HAVE_SIGNAL_H
-#include <signal.h>
-#endif /* HAVE_SIGNAL_H */
-
-#ifdef HAVE_SYS_UTSNAME_H
-#include <sys/utsname.h>
-#endif /* HAVE_SYS_UTSNAME_H */
-
-#ifdef HAVE_SYS_TIMES_H
-#include <sys/times.h>
-#endif /* HAVE_SYS_TIMES_H */
-
-#ifdef HAVE_DIRENT_H
-#include <dirent.h>
-#endif /* HAVE_DIRENT_H */
-
-#ifdef HAVE_SYS_STAT_H
-#include <sys/stat.h>
-#endif /* HAVE_SYS_STAT_H */
-
-#ifdef HAVE_FCNTL_H
-#include <fcntl.h>
-#endif /* HAVE_FCNTL_H */
-
-#ifdef HAVE_UNISTD_H
-#include <unistd.h>
-#endif /* HAVE_UNISTD_H */
-
-#ifdef HAVE_UTIME_H
-#include <utime.h>
-#endif /* HAVE_UTIME_H */
-
-#ifdef HAVE_TERMIOS_H
-#include <termios.h>
-#endif /* HAVE_TERMIOS_H */
-
-#ifdef HAVE_GRP_H
-#include <grp.h>
-#endif /* HAVE_GRP_H */
-
-#ifdef HAVE_PWD_H
-#include <pwd.h>
-#endif /* HAVE_PWD_H */
-
-#ifndef _POSIX_VDISABLE
-#define _POSIX_VDISABLE '\0' /* Just a guess...but it works for Suns */
-#endif
-
-extern I_ nocldstop;
-
-#define LIBPOSIX_H
-#endif
diff --git a/ghc/includes/platform.h.in b/ghc/includes/platform.h.in
index 226dc4c233..87cf5eaa33 100644
--- a/ghc/includes/platform.h.in
+++ b/ghc/includes/platform.h.in
@@ -1,23 +1,25 @@
#ifndef PLATFORM_H
#define PLATFORM_H
-#define alpha_dec_osf1 1
-#define hppa1_1_hp_hpux 2
-#define i386_next_nextstep3 3
-#define i386_unknown_freebsd 4
-#define i386_unknown_linux 5
-#define i386_unknown_netbsd 6
-#define i386_unknown_solaris2 7
-#define m68k_apple_macos7 8
-#define m68k_next_nextstep 9
-#define m68k_simonpj_grip 10
-#define m68k_sun_sunos4 11
-#define mips_dec_ultrix 12
-#define mips_sgi_irix 13
-#define rs6000_ibm_aix 14
-#define sparc_sun_solaris2 15
-#define sparc_sun_sunos4 16
-#define i386_unknown_linuxaout 17
+#define alpha_dec_osf1 10
+#define alpha_unknown_linux 11
+#define hppa1_1_hp_hpux 20
+#define i386_next_nextstep3 30
+#define i386_unknown_bsdi 31
+#define i386_unknown_freebsd 32
+#define i386_unknown_linux 33
+#define i386_unknown_linuxaout 34
+#define i386_unknown_netbsd 35
+#define i386_unknown_solaris2 36
+#define m68k_next_nextstep 50
+#define m68k_simonpj_grip 51
+#define m68k_sun_sunos4 52
+#define mips_dec_ultrix 60
+#define mips_sgi_irix 61
+#define powerpc_ibm_aix 70
+#define sparc_sun_solaris2 80
+#define sparc_sun_sunos4 81
+#define sparc_unknown_linux 82
#define HostPlatform_TYPE @HostPlatform_CPP@
#define TargetPlatform_TYPE @HostPlatform_CPP@
diff --git a/ghc/includes/root.lit b/ghc/includes/root.lit
deleted file mode 100644
index f7d0a9704b..0000000000
--- a/ghc/includes/root.lit
+++ /dev/null
@@ -1,89 +0,0 @@
-\begin{onlystandalone}
-\documentstyle[11pt,literate]{article}
-\begin{document}
-\title{Imported Files}
-\author{The GRASP Team (editor: Kevin Hammond), \\
-Department of Computing Science, \\
-University of Glasgow, \\
-Glasgow, G12 8QQ, UK. \\
-\\
-Email: glasgow-haskell-\{request,bugs\}\@dcs.glasgow.ac.uk}
-\date{November 1992}
-\maketitle
-\begin{rawlatex}
-\tableofcontents
-\clearpage
-\end{rawlatex}
-\end{onlystandalone}
-
-\begin{onlypartofdoc}
-\section[imported-files]{Imported files}
-\downsection
-\end{onlypartofdoc}
-
-%---------------------------------------------------------------
-
-%************************************************************************
-%* *
-\section[Definitions]{Standard Definitions}
-%* *
-%************************************************************************
-
-\downsection
-\input{StgTypes.lh}
-\input{GhcConstants.lh}
-\input{StgRegs.lh}
-\upsection
-
-%************************************************************************
-%* *
-\section[COpt-defns]{Definitions for Optimised C}
-%* *
-%************************************************************************
-
-\downsection
-\input{COptJumps.lh}
-\input{COptWraps.lh}
-\upsection
-
-%************************************************************************
-%* *
-%%%%%\section[SMinterface.lh]{Storage Manager Interface}
-%* *
-%************************************************************************
-
-\input{SMinterface.lh}
-
-%************************************************************************
-%* *
-\section[SM-defns]{Storage Manager Definitions}
-%* *
-%************************************************************************
-
-\downsection
-\input{SMcompact.lh}
-\input{SMcopying.lh}
-\input{SMmark.lh}
-\input{SMupdate.lh}
-\upsection
-
-%************************************************************************
-%* *
-%\section[Ticky-counts]{Definitions for ``ticky-ticky'' profiling}
-%* *
-%************************************************************************
-
-\downsection
-\input{Ticky.lh}
-\upsection
-
-%------------------------------------------------------------------------
-
-\begin{onlypartofdoc}
-\upsection
-\end{onlypartofdoc}
-
-\begin{onlystandalone}
-%\printindex
-\end{document}
-\end{onlystandalone}
diff --git a/ghc/includes/stgdefs.h b/ghc/includes/stgdefs.h
index f2f6e58428..73eba90625 100644
--- a/ghc/includes/stgdefs.h
+++ b/ghc/includes/stgdefs.h
@@ -203,7 +203,7 @@ StgFunPtr impossible_jump_after_switch(STG_NO_ARGS);
/* hooks: user might write some of their own */
void ErrorHdrHook PROTO((FILE *));
-void OutOfHeapHook PROTO((W_));
+void OutOfHeapHook PROTO((W_, W_));
void StackOverflowHook PROTO((I_));
#ifdef CONCURRENT
void NoRunnableThreadsHook (STG_NO_ARGS);
diff --git a/ghc/includes/timezone.h b/ghc/includes/timezone.h
deleted file mode 100644
index bedafdf350..0000000000
--- a/ghc/includes/timezone.h
+++ /dev/null
@@ -1,31 +0,0 @@
-#ifndef TIMEZONE_H
-#define TIMEZONE_H
-
-#define _OSF_SOURCE
-
-#if TIME_WITH_SYS_TIME
-# include <sys/time.h>
-# include <time.h>
-#else
-# if HAVE_SYS_TIME_H
-# include <sys/time.h>
-# else
-# include <time.h>
-# endif
-#endif
-
-#if HAVE_TM_ZONE
-#define ZONE(x) (((struct tm *)x)->tm_zone)
-#define SETZONE(x,z) (((struct tm *)x)->tm_zone = z)
-#define GMTOFF(x) (((struct tm *)x)->tm_gmtoff)
-#else
-#if HAVE_TZNAME
-extern time_t timezone, altzone;
-extern char *tzname[2];
-#define ZONE(x) (((struct tm *)x)->tm_isdst ? tzname[1] : tzname[0])
-#define SETZONE(x,z)
-#define GMTOFF(x) (((struct tm *)x)->tm_isdst ? altzone : timezone)
-#endif
-#endif
-
-#endif
diff --git a/ghc/lib/Jmakefile b/ghc/lib/Jmakefile
index c25b3b4d57..da21653e2b 100644
--- a/ghc/lib/Jmakefile
+++ b/ghc/lib/Jmakefile
@@ -40,7 +40,7 @@ EtagsNeededHere(tags)
****************************************************************/
/* The driver will give warnings if -split-objs, but that's cool... */
-GHC_OPTS=-short -cpp \
+GHC_OPTS=-recomp -cpp \
-dcore-lint \
-irequired \
-fusing-ghc-internals \
@@ -67,6 +67,7 @@ HSTAGS_OPTS=-fglasgow-exts
BASIC_HS = \
prelude/Prelude.hs \
prelude/GHCbase.hs \
+prelude/GHCerr.hs \
prelude/GHCps.hs \
prelude/GHCio.hs \
prelude/GHCmain.hs \
@@ -92,6 +93,8 @@ concurrent/SampleVar.hs \
concurrent/Semaphore.hs \
concurrent/Concurrent.hs
+BASIC_HIs = $(BASIC_HS:.hs=.hi)
+
BASIC_OBJS_DIRS = $(BASIC_HS:.hs=)
/* easy way to make many many Make variables: */
@@ -103,46 +106,6 @@ WayThingVars(BASIC)
* *
*************************************************************************/
-#ifndef SpecialGhcLibraryTarget
-#define SpecialGhcLibraryTarget(lib,tag,objs_DEP,objs_DIR,find_pat) @@\
-AllTarget(CAT3(lib,tag,.a)) @@\
-CAT3(lib,tag,.a):: objs_DEP @@\
- $(RM) $@ @@\
- TMPDIR=$(TMPDIR); export TMPDIR; \
- find objs_DIR -name find_pat -print | xargs ar q $@ @@\
- $(RANLIB) CAT3(lib,tag,.a) @@\
-clean :: @@\
- $(RM) CAT3(lib,tag,.a)
-#endif /* SpecialGhcLibraryTarget */
-
-#ifndef SpecialGhcLibInstallTarget
-#if DoInstallGHCSystem == YES
-#define SpecialGhcLibInstallTarget(lib,tag) @@\
-install :: CAT3(lib,tag,.a) @@\
- $(INSTALL) $(INSTLIBFLAGS) CAT3(lib,tag,.a) \
- $(INSTLIBDIR_GHC)/CAT3(lib,tag,.a) @@\
- $(RANLIB) $(INSTLIBDIR_GHC)/CAT3(lib,tag,.a)
-#else /* ! DoInstallGhc... */
-#define SpecialGhcLibInstallTarget(lib,tag) /*nothing*/
-#endif /* ! DoInstallGhc... */
-#endif /* SpecialGhcLibInstallTarget */
-
-/* build/install all the diff libs for a particular build */
-
-#define BigHisTarget(targeti,basei) @@\
-targeti :: @@\
-InstallTarget(targeti) @@\
-InstallMultNonExecTargets(targeti,basei,$(INSTDATADIR_GHC)/imports)
-
-#define BigLibsTarget(tag,glob,base) @@\
-SpecialGhcLibraryTarget(libHS,tag,base,$(BASIC_OBJS_DIRS),glob) @@\
-SpecialGhcLibInstallTarget(libHS,tag)
-
-/* build *everything* -- monster macro from hell */
-#define BigBuildTarget(tag,glob,targeti,base,basei) \
-BigLibsTarget(tag,glob,base) @@\
-BigHisTarget(targeti,basei)
-
/****************************************************************
* *
* Creating and installing... *
@@ -152,50 +115,12 @@ BigHisTarget(targeti,basei)
/* make sure install's target dir is there */
#if DoInstallGHCSystem == YES
-MakeDirectories(install, $(INSTLIBDIR_GHC) \
- $(INSTDATADIR_GHC)/imports)
+MakeDirectories(install, $(INSTLIBDIR_GHC) $(INSTDATADIR_GHC)/imports)
-basic_his :: /* nothing */
-InstallTarget(basic_his)
-InstallMultNonExecTargets(basic_his, $(BASIC_HIs), $(INSTDATADIR_GHC)/imports)
+InstallDataTarget(MODULES,$(INSTDATADIR_GHC)/imports)
#endif /* installing */
-IfBuild_normal(BigBuildTarget(,'*.o',basic_his, $(BASIC_DEP_norm), $(BASIC_HIs)))
-IfBuild_p(BigBuildTarget(_p,'*.p_o',his_p, $(BASIC_DEP_p), $(BASIC_HIs_p)))
-IfBuild_t(BigBuildTarget(_t,'*.t_o',his_t, $(BASIC_DEP_t), $(BASIC_HIs_t)))
-IfBuild_u(BigBuildTarget(,'*.u_o',his_u, $(BASIC_DEP_u), $(BASIC_HIs_u)))
-IfBuild_mc(BigBuildTarget(_mc,'*.mc_o',his_mc, $(BASIC_DEP_mc), $(BASIC_HIs_mc)))
-IfBuild_mr(BigBuildTarget(_mr,'*.mr_o',his_mr, $(BASIC_DEP_mr), $(BASIC_HIs_mr)))
-IfBuild_mt(BigBuildTarget(_mr,'*.mt_o',his_mt, $(BASIC_DEP_mt), $(BASIC_HIs_mt)))
-IfBuild_mp(BigBuildTarget(_mp,'*.mp_o',his_mp, $(BASIC_DEP_mp), $(BASIC_HIs_mp)))
-IfBuild_mg(BigBuildTarget(_mg,'*.mg_o',his_mg, $(BASIC_DEP_mg), $(BASIC_HIs_mg)))
-
-/* these GC ones do not *really* need separate .hi files,
- but it really makes life easier to do it this way
-*/
-IfBuild_2s(BigBuildTarget(_2s,'*.2s_o',his_2s, $(BASIC_DEP_2s), $(BASIC_HIs_2s)))
-IfBuild_1s(BigBuildTarget(_1s,'*.1s_o',his_1s, $(BASIC_DEP_1s), $(BASIC_HIs_1s)))
-IfBuild_du(BigBuildTarget(_du,'*.du_o',his_du, $(BASIC_DEP_du), $(BASIC_HIs_du)))
-
-/* user ways -- yeeps! */
-
-IfBuild_a(BigBuildTarget(_a,'*.a_o',his_a, $(BASIC_DEP_a), $(BASIC_HIs_a)))
-IfBuild_b(BigBuildTarget(_b,'*.b_o',his_b, $(BASIC_DEP_b), $(BASIC_HIs_b)))
-IfBuild_c(BigBuildTarget(_c,'*.c_o',his_c, $(BASIC_DEP_c), $(BASIC_HIs_c)))
-IfBuild_d(BigBuildTarget(_d,'*.d_o',his_d, $(BASIC_DEP_d), $(BASIC_HIs_d)))
-IfBuild_e(BigBuildTarget(_e,'*.e_o',his_e, $(BASIC_DEP_e), $(BASIC_HIs_e)))
-IfBuild_f(BigBuildTarget(_f,'*.f_o',his_f, $(BASIC_DEP_f), $(BASIC_HIs_f)))
-IfBuild_g(BigBuildTarget(_g,'*.g_o',his_g, $(BASIC_DEP_g), $(BASIC_HIs_g)))
-IfBuild_h(BigBuildTarget(_h,'*.h_o',his_h, $(BASIC_DEP_h), $(BASIC_HIs_h)))
-IfBuild_i(BigBuildTarget(_i,'*.i_o',his_i, $(BASIC_DEP_i), $(BASIC_HIs_i)))
-IfBuild_j(BigBuildTarget(_j,'*.j_o',his_j, $(BASIC_DEP_j), $(BASIC_HIs_j)))
-IfBuild_k(BigBuildTarget(_k,'*.k_o',his_k, $(BASIC_DEP_k), $(BASIC_HIs_k)))
-IfBuild_l(BigBuildTarget(_l,'*.l_o',his_l, $(BASIC_DEP_l), $(BASIC_HIs_l)))
-IfBuild_m(BigBuildTarget(_m,'*.m_o',his_m, $(BASIC_DEP_m), $(BASIC_HIs_m)))
-IfBuild_n(BigBuildTarget(_n,'*.n_o',his_n, $(BASIC_DEP_n), $(BASIC_HIs_n)))
-IfBuild_o(BigBuildTarget(_o,'*.o_o',his_o, $(BASIC_DEP_o), $(BASIC_HIs_o)))
-IfBuild_A(BigBuildTarget(_A,'*.A_o',his_A, $(BASIC_DEP_A), $(BASIC_HIs_A)))
-IfBuild_B(BigBuildTarget(_B,'*.B_o',his_B, $(BASIC_DEP_B), $(BASIC_HIs_B)))
+BasicEverything(libHS, $(INSTLIBDIR_GHC), $(INSTDATADIR_GHC))
/****************************************************************
* *
@@ -220,29 +145,33 @@ clean :: @@\
/* now use the macro: */
-CompileWayishly(GHC,prelude/Prelude,hs,-iprelude -fglasgow-exts -fcompiling-ghc-internals Prelude -fno-implicit-prelude)
-CompileWayishly(GHC,prelude/GHCbase,hs,-iprelude -fglasgow-exts -fcompiling-ghc-internals GHCbase)
-CompileWayishly(GHC,prelude/GHCps,hs, -iprelude -fglasgow-exts)
-CompileWayishly(GHC,prelude/GHCio,hs, -iprelude -fglasgow-exts)
+/* NB: the -Onots are only because -O would not go through on
+ a reasonably-sized machine (i.e., one I have)
+*/
+CompileWayishly(GHC,prelude/Prelude,hs,/*-split-objs Prelude*/ -iprelude -fglasgow-exts -fcompiling-ghc-internals Prelude -fno-implicit-prelude '-#include"cbits/stgio.h"' -H18m -Onot)
+CompileWayishly(GHC,prelude/GHCbase,hs,/*-split-objs GHCbase*/ -iprelude -fglasgow-exts -fcompiling-ghc-internals GHCbase '-#include"cbits/stgio.h"' -H12m -monly-2-regs -Onot)
+CompileWayishly(GHC,prelude/GHCerr,hs, /*-split-objs GHCerr*/ -iprelude -fglasgow-exts -fcompiling-ghc-internals GHCerr -H12m -Onot)
+CompileWayishly(GHC,prelude/GHCps,hs, /*-split-objs GHCps*/ -iprelude -fglasgow-exts '-#include"cbits/stgio.h"' -monly-3-regs -Onot)
+CompileWayishly(GHC,prelude/GHCio,hs, /*-split-objs GHCio*/ -iprelude -fglasgow-exts '-#include"cbits/stgio.h"' -Onot)
CompileWayishly(GHC,prelude/GHCmain,hs,-iprelude -fglasgow-exts)
CompileWayishly(GHC,prelude/PreludeGlaST,hs,-iprelude -fglasgow-exts)
-CompileWayishly(GHC,required/Array,hs,-fglasgow-exts -iprelude)
-CompileWayishly(GHC,required/Char,hs,)
-CompileWayishly(GHC,required/Complex,hs,)
-CompileWayishly(GHC,required/Directory,hs,-fglasgow-exts)
-CompileWayishly(GHC,required/IO,hs,-fglasgow-exts)
-CompileWayishly(GHC,required/Ix,hs,-fglasgow-exts)
-CompileWayishly(GHC,required/List,hs,)
-CompileWayishly(GHC,required/Maybe,hs,)
-CompileWayishly(GHC,required/Monad,hs,)
-CompileWayishly(GHC,required/Ratio,hs,)
-CompileWayishly(GHC,required/System,hs,-fglasgow-exts)
+CompileWayishly(GHC,required/Array,hs, /*-split-objs Array*/ -fglasgow-exts -iprelude -Onot)
+CompileWayishly(GHC,required/Char,hs, /*-split-objs Char*/)
+CompileWayishly(GHC,required/Complex,hs,/*-split-objs Complex*/)
+CompileWayishly(GHC,required/Directory,hs,/*-split-objs Directory*/ -fglasgow-exts '-#include"cbits/stgio.h"' -monly-3-regs)
+CompileWayishly(GHC,required/IO,hs,/*-split-objs IO*/ -fglasgow-exts '-#include"cbits/stgio.h"')
+CompileWayishly(GHC,required/Ix,hs, /*-split-objs Ix*/ -fglasgow-exts)
+CompileWayishly(GHC,required/List,hs, /*-split-objs List*/)
+CompileWayishly(GHC,required/Maybe,hs, /*-split-objs Maybe*/)
+CompileWayishly(GHC,required/Monad,hs, /*-split-objs Monad*/)
+CompileWayishly(GHC,required/Ratio,hs, /*-split-objs Ratio*/)
+CompileWayishly(GHC,required/System,hs,/*-split-objs System*/ -fglasgow-exts '-#include"cbits/stgio.h"')
CompileWayishly(GHC,concurrent/Channel,hs,)
CompileWayishly(GHC,concurrent/ChannelVar,hs,)
CompileWayishly(GHC,concurrent/Merge,hs,-iconcurrent)
-CompileWayishly(GHC,concurrent/Parallel,hs,)
+CompileWayishly(GHC,concurrent/Parallel,hs,-fglasgow-exts)
CompileWayishly(GHC,concurrent/SampleVar,hs,)
CompileWayishly(GHC,concurrent/Semaphore,hs,)
CompileWayishly(GHC,concurrent/Concurrent,hs,-iconcurrent)
diff --git a/ghc/lib/MODULES b/ghc/lib/MODULES
new file mode 100644
index 0000000000..fab3e1dbc2
--- /dev/null
+++ b/ghc/lib/MODULES
@@ -0,0 +1,26 @@
+# Modules that the user is allowed to mention.
+# 'mkdependHS' consults this list.
+Array
+Channel
+ChannelVar
+Char
+Complex
+Concurrent
+Directory
+GHCbase
+GHCio
+GHCmain
+GHCps
+IO
+Ix
+List
+Maybe
+Merge
+Monad
+Parallel
+Prelude
+PreludeGlaST
+Ratio
+SampleVar
+Semaphore
+System
diff --git a/ghc/lib/cbits/Jmakefile b/ghc/lib/cbits/Jmakefile
index e69de29bb2..2fc374cc14 100644
--- a/ghc/lib/cbits/Jmakefile
+++ b/ghc/lib/cbits/Jmakefile
@@ -0,0 +1,68 @@
+/* Literate-pgmming suffix rules used herein */
+UnlitSuffixRule(.lc,.c)
+
+CBITS_LC = \
+ closeFile.lc \
+ createDirectory.lc \
+ errno.lc \
+ fileEOF.lc \
+ fileGetc.lc \
+ fileLookAhead.lc \
+ filePosn.lc \
+ filePutc.lc \
+ fileSize.lc \
+ flushFile.lc \
+ getBufferMode.lc \
+ getCurrentDirectory.lc \
+ getDirectoryContents.lc \
+ getLock.lc \
+ inputReady.lc \
+ openFile.lc \
+ readFile.lc \
+ removeDirectory.lc \
+ removeFile.lc \
+ renameDirectory.lc \
+ renameFile.lc \
+ seekFile.lc \
+ setBuffering.lc \
+ setCurrentDirectory.lc \
+ system.lc \
+ writeFile.lc
+
+CBITS_OBJS = $(CBITS_LC:.lc=.o)
+C_FILES = $(CBITS_LC:.lc=.c)
+
+ExtraStuffToClean ( $(C_FILES) )
+
+NormalLibraryTarget(HS_cbits,$(CBITS_OBJS))
+ExtraStuffToClean($(CBITS_OBJS))
+#if DoInstallGHCSystem == YES
+InstallLibraryTarget(HS_cbits,$(INSTLIBDIR_GHC))
+#endif
+
+CompileCBitsly(GHC,closeFile,)
+CompileCBitsly(GHC,createDirectory,)
+CompileCBitsly(GHC,errno,)
+CompileCBitsly(GHC,fileEOF,)
+CompileCBitsly(GHC,fileGetc,)
+CompileCBitsly(GHC,fileLookAhead,)
+CompileCBitsly(GHC,filePosn,)
+CompileCBitsly(GHC,filePutc,)
+CompileCBitsly(GHC,fileSize,)
+CompileCBitsly(GHC,flushFile,)
+CompileCBitsly(GHC,getBufferMode,)
+CompileCBitsly(GHC,getCurrentDirectory,)
+CompileCBitsly(GHC,getDirectoryContents,)
+CompileCBitsly(GHC,getLock,)
+CompileCBitsly(GHC,inputReady,)
+CompileCBitsly(GHC,openFile,)
+CompileCBitsly(GHC,readFile,)
+CompileCBitsly(GHC,removeDirectory,)
+CompileCBitsly(GHC,removeFile,)
+CompileCBitsly(GHC,renameDirectory,)
+CompileCBitsly(GHC,renameFile,)
+CompileCBitsly(GHC,seekFile,)
+CompileCBitsly(GHC,setBuffering,)
+CompileCBitsly(GHC,setCurrentDirectory,)
+CompileCBitsly(GHC,system,)
+CompileCBitsly(GHC,writeFile,)
diff --git a/ghc/lib/cbits/getLock.lc b/ghc/lib/cbits/getLock.lc
new file mode 100644
index 0000000000..f39014e25e
--- /dev/null
+++ b/ghc/lib/cbits/getLock.lc
@@ -0,0 +1,138 @@
+%
+% (c) The GRASP/AQUA Project, Glasgow University, 1994
+%
+\subsection[getLock.lc]{stdin/stout/stderr Runtime Support}
+
+\begin{code}
+
+#include "rtsdefs.h"
+#include "stgio.h"
+
+#ifdef HAVE_SYS_TYPES_H
+#include <sys/types.h>
+#endif
+
+#ifdef HAVE_SYS_STAT_H
+#include <sys/stat.h>
+#endif
+
+#ifdef HAVE_UNISTD_H
+#include <unistd.h>
+#endif
+
+#ifdef HAVE_FCNTL_H
+#include <fcntl.h>
+#endif
+
+#ifndef FD_SETSIZE
+#define FD_SETSIZE 256
+#endif
+
+typedef struct {
+ dev_t device;
+ ino_t inode;
+ int fd;
+} Lock;
+
+static Lock readLock[FD_SETSIZE];
+static Lock writeLock[FD_SETSIZE];
+
+static int readLocks = 0;
+static int writeLocks = 0;
+
+int
+lockFile(fd, exclusive)
+int fd;
+int exclusive;
+{
+ int i;
+ struct stat sb;
+
+ while (fstat(fd, &sb) < 0) {
+ if (errno != EINTR) {
+ return -1;
+ }
+ }
+
+ /* Only lock regular files */
+ if (!S_ISREG(sb.st_mode))
+ return 0;
+
+ for (i = 0; i < writeLocks; i++)
+ if (writeLock[i].inode == sb.st_ino && writeLock[i].device == sb.st_dev) {
+ errno = EAGAIN;
+ return -1;
+ }
+
+ if (!exclusive) {
+ i = readLocks++;
+ readLock[i].device = sb.st_dev;
+ readLock[i].inode = sb.st_ino;
+ readLock[i].fd = fd;
+ return 0;
+ }
+
+ for (i = 0; i < readLocks; i++)
+ if (readLock[i].inode == sb.st_ino && readLock[i].device == sb.st_dev) {
+ errno = EAGAIN;
+ return -1;
+ }
+
+ i = writeLocks++;
+ writeLock[i].device = sb.st_dev;
+ writeLock[i].inode = sb.st_ino;
+ writeLock[i].fd = fd;
+ return 0;
+}
+
+void
+unlockFile(fd)
+int fd;
+{
+ int i;
+
+ for (i = 0; i < readLocks; i++)
+ if (readLock[i].fd == fd) {
+ while (++i < readLocks)
+ readLock[i - 1] = readLock[i];
+ readLocks--;
+ return;
+ }
+
+ for (i = 0; i < writeLocks; i++)
+ if (writeLock[i].fd == fd) {
+ while (++i < writeLocks)
+ writeLock[i - 1] = writeLock[i];
+ writeLocks--;
+ return;
+ }
+}
+
+StgInt
+getLock(fp, exclusive)
+StgAddr fp;
+StgInt exclusive;
+{
+ if (lockFile(fileno((FILE *) fp), exclusive) < 0) {
+ if (errno == EBADF)
+ return 0;
+ else {
+ cvtErrno();
+ switch (ghc_errno) {
+ default:
+ stdErrno();
+ break;
+ case GHC_EACCES:
+ case GHC_EAGAIN:
+ ghc_errtype = ERR_RESOURCEBUSY;
+ ghc_errstr = "file is locked";
+ break;
+ }
+ (void) fclose((FILE *) fp);
+ return -1;
+ }
+ }
+ return 1;
+}
+
+\end{code}
diff --git a/ghc/lib/cbits/inputReady.lc b/ghc/lib/cbits/inputReady.lc
new file mode 100644
index 0000000000..fc8184e994
--- /dev/null
+++ b/ghc/lib/cbits/inputReady.lc
@@ -0,0 +1,87 @@
+%
+% (c) The GRASP/AQUA Project, Glasgow University, 1994
+%
+\subsection[inputReady.lc]{hReady Runtime Support}
+
+\begin{code}
+
+#include "rtsdefs.h"
+#include "stgio.h"
+
+#ifdef HAVE_SYS_TYPES_H
+#include <sys/types.h>
+#endif
+
+#ifdef HAVE_UNISTD_H
+#include <unistd.h>
+#endif
+
+#ifdef HAVE_FCNTL_H
+#include <fcntl.h>
+#endif
+
+StgInt
+inputReady(fp)
+StgAddr fp;
+{
+ int flags;
+ int c;
+
+ if (feof((FILE *) fp))
+ return 0;
+
+ /* Get the original file status flags */
+ while ((flags = fcntl(fileno((FILE *) fp), F_GETFL)) < 0) {
+ /* highly unlikely */
+ if (errno != EINTR) {
+ cvtErrno();
+ stdErrno();
+ return -1;
+ }
+ }
+
+ /* If it's not already non-blocking, make it so */
+ if (!(flags & O_NONBLOCK)) {
+ while (fcntl(fileno((FILE *) fp), F_SETFL, flags | O_NONBLOCK) < 0) {
+ /* still highly unlikely */
+ if (errno != EINTR) {
+ cvtErrno();
+ stdErrno();
+ return -1;
+ }
+ }
+ }
+ /* Now try to get a character */
+ while ((c = getc((FILE *) fp)) == EOF && errno == EINTR)
+ clearerr((FILE *) fp);
+
+ /* If we made it non-blocking for this, switch it back */
+ if (!(flags & O_NONBLOCK)) {
+ while (fcntl(fileno((FILE *) fp), F_SETFL, flags) < 0) {
+ /* still highly unlikely */
+ if (errno != EINTR) {
+ cvtErrno();
+ stdErrno();
+ return -1;
+ }
+ }
+ }
+
+ if (c == EOF) {
+ if (errno == EAGAIN || feof((FILE *) fp)) {
+ clearerr((FILE *) fp);
+ return 0;
+ } else {
+ cvtErrno();
+ stdErrno();
+ return -1;
+ }
+ } else if (ungetc(c, (FILE *) fp) == EOF) {
+ cvtErrno();
+ stdErrno();
+ return -1;
+ } else
+ return 1;
+}
+
+\end{code}
diff --git a/ghc/lib/cbits/openFile.lc b/ghc/lib/cbits/openFile.lc
new file mode 100644
index 0000000000..73ebe2462a
--- /dev/null
+++ b/ghc/lib/cbits/openFile.lc
@@ -0,0 +1,209 @@
+%
+% (c) The GRASP/AQUA Project, Glasgow University, 1994
+%
+\subsection[openFile.lc]{openFile Runtime Support}
+
+\begin{code}
+
+#include "rtsdefs.h"
+#include "stgio.h"
+
+#ifdef HAVE_SYS_TYPES_H
+#include <sys/types.h>
+#endif
+
+#ifdef HAVE_SYS_STAT_H
+#include <sys/stat.h>
+#endif
+
+#ifdef HAVE_UNISTD_H
+#include <unistd.h>
+#endif
+
+#ifdef HAVE_FCNTL_H
+#include <fcntl.h>
+#endif
+
+StgAddr
+openFile(file, how)
+StgByteArray file;
+StgByteArray how;
+{
+ FILE *fp;
+ int fd;
+ int oflags;
+ int exclusive;
+ int created = 0;
+ struct stat sb;
+
+ /*
+ * Since we aren't supposed to succeed when we're opening for writing and
+ * there's another writer, we can't just do an fopen() for "w" mode.
+ */
+
+ switch (how[0]) {
+ case 'a':
+ oflags = O_WRONLY | O_NOCTTY | O_APPEND;
+ exclusive = 1;
+ break;
+ case 'w':
+ oflags = O_WRONLY | O_NOCTTY;
+ exclusive = 1;
+ break;
+ case 'r':
+ oflags = how[1] == '+' ? O_RDWR | O_NOCTTY : O_RDONLY | O_NOCTTY;
+ exclusive = 0;
+ break;
+ default:
+ fprintf(stderr, "openFile: unknown mode `%s'\n", how);
+ EXIT(EXIT_FAILURE);
+ }
+
+ /* First try to open without creating */
+ while ((fd = open(file, oflags, 0666)) < 0) {
+ if (errno == ENOENT) {
+ if (how[0] == 'r' && how[1] == '\0') {
+ /* For ReadMode, just bail out now */
+ ghc_errtype = ERR_NOSUCHTHING;
+ ghc_errstr = "file does not exist";
+ return NULL;
+ }
+ /* Now try to create it */
+ while ((fd = open(file, oflags | O_CREAT | O_EXCL, 0666)) < 0) {
+ if (errno == EEXIST) {
+ /* Race detected; go back and open without creating it */
+ break;
+ } else if (errno != EINTR) {
+ cvtErrno();
+ switch (ghc_errno) {
+ default:
+ stdErrno();
+ break;
+ case GHC_ENOENT:
+ case GHC_ENOTDIR:
+ ghc_errtype = ERR_NOSUCHTHING;
+ ghc_errstr = "no path to file";
+ break;
+ case GHC_EINVAL:
+ ghc_errtype = ERR_PERMISSIONDENIED;
+ ghc_errstr = "unsupported owner or group";
+ break;
+ }
+ return NULL;
+ }
+ }
+ if (fd >= 0) {
+ created = 1;
+ break;
+ }
+ } else if (errno != EINTR) {
+ cvtErrno();
+ switch (ghc_errno) {
+ default:
+ stdErrno();
+ break;
+ case GHC_ENOTDIR:
+ ghc_errtype = ERR_NOSUCHTHING;
+ ghc_errstr = "no path to file";
+ break;
+ case GHC_EINVAL:
+ ghc_errtype = ERR_PERMISSIONDENIED;
+ ghc_errstr = "unsupported owner or group";
+ break;
+ }
+ return NULL;
+ }
+ }
+
+ /* Make sure that we aren't looking at a directory */
+
+ while (fstat(fd, &sb) < 0) {
+ /* highly unlikely */
+ if (errno != EINTR) {
+ cvtErrno();
+ if (created)
+ (void) unlink(file);
+ (void) close(fd);
+ return NULL;
+ }
+ }
+ if (S_ISDIR(sb.st_mode)) {
+ ghc_errtype = ERR_INAPPROPRIATETYPE;
+ ghc_errstr = "file is a directory";
+ /* We can't have created it in this case. */
+ (void) close(fd);
+
+ return NULL;
+ }
+ /* Use our own personal locking */
+
+ if (lockFile(fd, exclusive) < 0) {
+ cvtErrno();
+ switch (ghc_errno) {
+ default:
+ stdErrno();
+ break;
+ case GHC_EACCES:
+ case GHC_EAGAIN:
+ ghc_errtype = ERR_RESOURCEBUSY;
+ ghc_errstr = "file is locked";
+ break;
+ }
+ if (created)
+ (void) unlink(file);
+ (void) close(fd);
+ return NULL;
+ }
+
+ /*
+ * Write mode is supposed to truncate the file. Unfortunately, our pal
+ * ftruncate() is non-POSIX, so we truncate with a second open, which may fail.
+ */
+
+ if (how[0] == 'w') {
+ int fd2;
+
+ oflags |= O_TRUNC;
+ while ((fd2 = open(file, oflags, 0666)) < 0) {
+ if (errno != EINTR) {
+ cvtErrno();
+ if (created)
+ (void) unlink(file);
+ (void) close(fd);
+ switch (ghc_errno) {
+ default:
+ stdErrno();
+ break;
+ case GHC_EAGAIN:
+ ghc_errtype = ERR_RESOURCEBUSY;
+ ghc_errstr = "enforced lock prevents truncation";
+ break;
+ case GHC_ENOTDIR:
+ ghc_errtype = ERR_NOSUCHTHING;
+ ghc_errstr = "no path to file";
+ break;
+ case GHC_EINVAL:
+ ghc_errtype = ERR_PERMISSIONDENIED;
+ ghc_errstr = "unsupported owner or group";
+ break;
+ }
+ return NULL;
+ }
+ }
+ close(fd2);
+ }
+ errno = 0; /* Just in case fdopen() is lame */
+ while ((fp = fdopen(fd, how)) == NULL) {
+ if (errno != EINTR) {
+ cvtErrno();
+ if (created)
+ (void) unlink(file);
+ (void) close(fd);
+ return NULL;
+ }
+ }
+
+ return (StgAddr) fp;
+}
+
+\end{code}
diff --git a/ghc/lib/cbits/readFile.lc b/ghc/lib/cbits/readFile.lc
new file mode 100644
index 0000000000..2b649e3dbd
--- /dev/null
+++ b/ghc/lib/cbits/readFile.lc
@@ -0,0 +1,102 @@
+%
+% (c) The GRASP/AQUA Project, Glasgow University, 1994
+%
+\subsection[readFile.lc]{hGetContents Runtime Support}
+
+\begin{code}
+
+#include "rtsdefs.h"
+#include "stgio.h"
+
+#define EOT 4
+
+StgInt
+readBlock(buf, fp, size)
+StgAddr buf;
+StgAddr fp;
+StgInt size;
+{
+ int count;
+
+ if (feof((FILE *) fp)) {
+ ghc_errtype = ERR_EOF;
+ ghc_errstr = "";
+ return -1;
+ }
+
+ while ((count = fread(buf, 1, size, (FILE *) fp)) == 0) {
+ if (feof((FILE *) fp)) {
+ ghc_errtype = ERR_EOF;
+ ghc_errstr = "";
+ return -1;
+ } else if (errno != EINTR) {
+ cvtErrno();
+ stdErrno();
+ return -1;
+ }
+ clearerr((FILE *) fp);
+ }
+
+ return count;
+}
+
+StgInt
+readLine(buf, fp, size)
+StgAddr buf;
+StgAddr fp;
+StgInt size;
+{
+ if (feof((FILE *) fp)) {
+ ghc_errtype = ERR_EOF;
+ ghc_errstr = "";
+ return -1;
+ }
+
+ while (fgets(buf, size, (FILE *) fp) == NULL) {
+ if (feof((FILE *) fp)) {
+ ghc_errtype = ERR_EOF;
+ ghc_errstr = "";
+ return -1;
+ } else if (errno != EINTR) {
+ cvtErrno();
+ stdErrno();
+ return -1;
+ }
+ clearerr((FILE *) fp);
+ }
+
+ return strlen(buf);
+}
+
+StgInt
+readChar(fp)
+StgAddr fp;
+{
+ int c;
+
+ if (feof((FILE *) fp)) {
+ ghc_errtype = ERR_EOF;
+ ghc_errstr = "";
+ return -1;
+ }
+
+ while ((c = getc((FILE *) fp)) == EOF) {
+ if (feof((FILE *) fp)) {
+ ghc_errtype = ERR_EOF;
+ ghc_errstr = "";
+ return -1;
+ } else if (errno != EINTR) {
+ cvtErrno();
+ stdErrno();
+ return -1;
+ }
+ clearerr((FILE *) fp);
+ }
+
+ if (isatty(fileno((FILE *) fp)) && c == EOT)
+ return EOF;
+ else
+ return c;
+}
+
+\end{code}
diff --git a/ghc/lib/cbits/removeDirectory.lc b/ghc/lib/cbits/removeDirectory.lc
new file mode 100644
index 0000000000..3347fd7c09
--- /dev/null
+++ b/ghc/lib/cbits/removeDirectory.lc
@@ -0,0 +1,57 @@
+%
+% (c) The GRASP/AQUA Project, Glasgow University, 1995
+%
+\subsection[removeDirectory.lc]{removeDirectory Runtime Support}
+
+\begin{code}
+
+#include "rtsdefs.h"
+#include "stgio.h"
+
+#ifdef HAVE_SYS_TYPES_H
+#include <sys/types.h>
+#endif
+
+#ifdef HAVE_SYS_STAT_H
+#include <sys/stat.h>
+#endif
+
+StgInt
+removeDirectory(path)
+StgByteArray path;
+{
+ struct stat sb;
+
+ /* Check for an actual directory */
+ while (stat(path, &sb) != 0) {
+ if (errno != EINTR) {
+ cvtErrno();
+ stdErrno();
+ return -1;
+ }
+ }
+ if (!S_ISDIR(sb.st_mode)) {
+ ghc_errtype = ERR_INAPPROPRIATETYPE;
+ ghc_errstr = "not a directory";
+ return -1;
+ }
+ while (rmdir(path) != 0) {
+ if (errno != EINTR) {
+ cvtErrno();
+ switch (ghc_errno) {
+ default:
+ stdErrno();
+ break;
+ case GHC_ENOTEMPTY:
+ case GHC_EEXIST:
+ ghc_errtype = ERR_UNSATISFIEDCONSTRAINTS;
+ ghc_errstr = "directory not empty";
+ break;
+ }
+ return -1;
+ }
+ }
+ return 0;
+}
+
+\end{code}
diff --git a/ghc/lib/cbits/removeFile.lc b/ghc/lib/cbits/removeFile.lc
new file mode 100644
index 0000000000..095b6215b5
--- /dev/null
+++ b/ghc/lib/cbits/removeFile.lc
@@ -0,0 +1,48 @@
+%
+% (c) The GRASP/AQUA Project, Glasgow University, 1995
+%
+\subsection[removeFile.lc]{removeFile Runtime Support}
+
+\begin{code}
+
+#include "rtsdefs.h"
+#include "stgio.h"
+
+#ifdef HAVE_SYS_TYPES_H
+#include <sys/types.h>
+#endif
+
+#ifdef HAVE_SYS_STAT_H
+#include <sys/stat.h>
+#endif
+
+StgInt
+removeFile(path)
+StgByteArray path;
+{
+ struct stat sb;
+
+ /* Check for a non-directory */
+ while (stat(path, &sb) != 0) {
+ if (errno != EINTR) {
+ cvtErrno();
+ stdErrno();
+ return -1;
+ }
+ }
+ if (S_ISDIR(sb.st_mode)) {
+ ghc_errtype = ERR_INAPPROPRIATETYPE;
+ ghc_errstr = "file is a directory";
+ return -1;
+ }
+ while (unlink(path) != 0) {
+ if (errno != EINTR) {
+ cvtErrno();
+ stdErrno();
+ return -1;
+ }
+ }
+ return 0;
+}
+
+\end{code}
diff --git a/ghc/lib/cbits/renameDirectory.lc b/ghc/lib/cbits/renameDirectory.lc
new file mode 100644
index 0000000000..2a41186bfe
--- /dev/null
+++ b/ghc/lib/cbits/renameDirectory.lc
@@ -0,0 +1,48 @@
+%
+% (c) The GRASP/AQUA Project, Glasgow University, 1995
+%
+\subsection[renameDirectory.lc]{renameDirectory Runtime Support}
+
+\begin{code}
+
+#include "rtsdefs.h"
+#include "stgio.h"
+
+#ifdef HAVE_SYS_TYPES_H
+#include <sys/types.h>
+#endif
+
+#ifdef HAVE_SYS_STAT_H
+#include <sys/stat.h>
+#endif
+
+StgInt
+renameDirectory(opath, npath)
+StgByteArray opath;
+StgByteArray npath;
+{
+ struct stat sb;
+
+ /* Check for an actual directory */
+ while (stat(opath, &sb) != 0) {
+ if (errno != EINTR) {
+ cvtErrno();
+ stdErrno();
+ return -1;
+ }
+ }
+ if (!S_ISDIR(sb.st_mode)) {
+ ghc_errtype = ERR_INAPPROPRIATETYPE;
+ ghc_errstr = "not a directory";
+ return -1;
+ }
+ while(rename(opath, npath) != 0) {
+ if (errno != EINTR) {
+ cvtErrno();
+ stdErrno();
+ return -1;
+ }
+ }
+ return 0;
+}
+\end{code}
diff --git a/ghc/lib/cbits/renameFile.lc b/ghc/lib/cbits/renameFile.lc
new file mode 100644
index 0000000000..2bcb9c0e04
--- /dev/null
+++ b/ghc/lib/cbits/renameFile.lc
@@ -0,0 +1,132 @@
+%
+% (c) The GRASP/AQUA Project, Glasgow University, 1995
+%
+\subsection[renameFile.lc]{renameFile Runtime Support}
+
+\begin{code}
+
+#include "rtsdefs.h"
+#include "stgio.h"
+
+#ifdef HAVE_SYS_TYPES_H
+#include <sys/types.h>
+#endif
+
+#ifdef HAVE_SYS_STAT_H
+#include <sys/stat.h>
+#endif
+
+#ifdef HAVE_FCNTL_H
+#include <fcntl.h>
+#endif
+
+StgInt
+renameFile(opath, npath)
+StgByteArray opath;
+StgByteArray npath;
+{
+ struct stat sb;
+ int fd;
+ int created = 0;
+
+ /* Check for a non-directory source */
+ while (stat(opath, &sb) != 0) {
+ if (errno != EINTR) {
+ cvtErrno();
+ stdErrno();
+ return -1;
+ }
+ }
+ if (S_ISDIR(sb.st_mode)) {
+ ghc_errtype = ERR_INAPPROPRIATETYPE;
+ ghc_errstr = "file is a directory";
+ return -1;
+ }
+
+ /* Ensure a non-directory destination */
+
+ /* First try to open without creating */
+ while ((fd = open(npath, O_RDONLY | O_NOCTTY, 0)) < 0) {
+ if (errno == ENOENT) {
+ /* Now try to create it */
+ while ((fd = open(npath, O_RDONLY | O_NOCTTY | O_CREAT | O_EXCL, 0)) < 0) {
+ if (errno == EEXIST) {
+ /* Race detected; go back and open without creating it */
+ break;
+ } else if (errno != EINTR) {
+ cvtErrno();
+ switch (ghc_errno) {
+ default:
+ stdErrno();
+ break;
+ case GHC_ENOENT:
+ case GHC_ENOTDIR:
+ ghc_errtype = ERR_NOSUCHTHING;
+ ghc_errstr = "no path to file";
+ break;
+ case GHC_EINVAL:
+ ghc_errtype = ERR_PERMISSIONDENIED;
+ ghc_errstr = "unsupported owner or group";
+ break;
+ }
+ return -1;
+ }
+ }
+ if (fd >= 0) {
+ created = 1;
+ break;
+ }
+ } else if (errno != EINTR) {
+ cvtErrno();
+ switch (ghc_errno) {
+ default:
+ stdErrno();
+ break;
+ case GHC_ENOTDIR:
+ ghc_errtype = ERR_NOSUCHTHING;
+ ghc_errstr = "no path to file";
+ break;
+ case GHC_EINVAL:
+ ghc_errtype = ERR_PERMISSIONDENIED;
+ ghc_errstr = "unsupported owner or group";
+ break;
+ }
+ return -1;
+ }
+ }
+
+ /* Make sure that we aren't looking at a directory */
+
+ while (fstat(fd, &sb) < 0) {
+ /* highly unlikely */
+ if (errno != EINTR) {
+ cvtErrno();
+ if (created)
+ (void) unlink(npath);
+ (void) close(fd);
+ return -1;
+ }
+ }
+ if (S_ISDIR(sb.st_mode)) {
+ ghc_errtype = ERR_INAPPROPRIATETYPE;
+ ghc_errstr = "destination is a directory";
+ /* We can't have created it in this case. */
+ (void) close(fd);
+ return -1;
+ }
+
+ while(rename(opath, npath) != 0) {
+ if (errno != EINTR) {
+ cvtErrno();
+ stdErrno();
+ if (created)
+ (void) unlink(npath);
+ (void) close(fd);
+ return -1;
+ }
+ }
+
+ close(fd);
+ return 0;
+}
+\end{code}
diff --git a/ghc/lib/cbits/seekFile.lc b/ghc/lib/cbits/seekFile.lc
new file mode 100644
index 0000000000..caff607018
--- /dev/null
+++ b/ghc/lib/cbits/seekFile.lc
@@ -0,0 +1,135 @@
+%
+% (c) The GRASP/AQUA Project, Glasgow University, 1994
+%
+\subsection[seekFile.lc]{hSeek and hIsSeekable Runtime Support}
+
+\begin{code}
+
+#include "rtsdefs.h"
+#include "stgio.h"
+
+#ifdef HAVE_SYS_TYPES_H
+#include <sys/types.h>
+#endif
+
+#ifdef HAVE_SYS_STAT_H
+#include <sys/stat.h>
+#endif
+
+StgInt
+seekFile(fp, whence, size, d)
+StgAddr fp;
+StgInt whence;
+StgInt size;
+StgByteArray d;
+{
+ struct stat sb;
+ long int offset;
+
+ /*
+ * We need to snatch the offset out of an MP_INT. The bits are there sans sign,
+ * which we pick up from our size parameter. If abs(size) is greater than 1,
+ * this integer is just too big.
+ */
+
+ switch (size) {
+ case -1:
+ offset = -*(StgInt *) d;
+ break;
+ case 0:
+ offset = 0;
+ break;
+ case 1:
+ offset = *(StgInt *) d;
+ break;
+ default:
+ ghc_errtype = ERR_INVALIDARGUMENT;
+ ghc_errstr = "offset out of range";
+ return -1;
+ }
+
+ /* Try to find out the file type & size for a physical file */
+ while (fstat(fileno((FILE *) fp), &sb) < 0) {
+ /* highly unlikely */
+ if (errno != EINTR) {
+ cvtErrno();
+ stdErrno();
+ return -1;
+ }
+ }
+ if (S_ISREG(sb.st_mode)) {
+ /* Verify that we are not seeking beyond end-of-file */
+ int posn;
+
+ switch (whence) {
+ case SEEK_SET:
+ posn = offset;
+ break;
+ case SEEK_CUR:
+ while ((posn = ftell((FILE *) fp)) == -1) {
+ /* the possibility seems awfully remote */
+ if (errno != EINTR) {
+ cvtErrno();
+ stdErrno();
+ return -1;
+ }
+ }
+ posn += offset;
+ break;
+ case SEEK_END:
+ posn = sb.st_size + offset;
+ break;
+ }
+ if (posn > sb.st_size) {
+ ghc_errtype = ERR_INVALIDARGUMENT;
+ ghc_errstr = "seek position beyond end of file";
+ return -1;
+ }
+ } else if (S_ISFIFO(sb.st_mode)) {
+ ghc_errtype = ERR_UNSUPPORTEDOPERATION;
+ ghc_errstr = "can't seek on a pipe";
+ return -1;
+ } else {
+ ghc_errtype = ERR_UNSUPPORTEDOPERATION;
+ ghc_errstr = "can't seek on a device";
+ return -1;
+ }
+ while (fseek((FILE *) fp, offset, whence) != 0) {
+ if (errno != EINTR) {
+ cvtErrno();
+ stdErrno();
+ return -1;
+ }
+ }
+ return 0;
+}
+
+StgInt
+seekFileP(fp)
+StgAddr fp;
+{
+ struct stat sb;
+
+ /* Try to find out the file type */
+ while (fstat(fileno((FILE *) fp), &sb) < 0) {
+ /* highly unlikely */
+ if (errno != EINTR) {
+ cvtErrno();
+ stdErrno();
+ return -1;
+ }
+ }
+ /* Regular files are okay */
+ if (S_ISREG(sb.st_mode)) {
+ return 1;
+ }
+ /* For now, everything else is not */
+ else {
+ return 0;
+ }
+}
+
+\end{code}
+
+
+
diff --git a/ghc/lib/cbits/setBuffering.lc b/ghc/lib/cbits/setBuffering.lc
new file mode 100644
index 0000000000..ffccf70ca0
--- /dev/null
+++ b/ghc/lib/cbits/setBuffering.lc
@@ -0,0 +1,123 @@
+%
+% (c) The GRASP/AQUA Project, Glasgow University, 1994
+%
+\subsection[setBuffering.lc]{hSetBuffering Runtime Support}
+
+\begin{code}
+
+#include "rtsdefs.h"
+#include "stgio.h"
+
+#ifdef HAVE_SYS_TYPES_H
+#include <sys/types.h>
+#endif
+
+#ifdef HAVE_SYS_STAT_H
+#include <sys/stat.h>
+#endif
+
+#ifdef HAVE_TERMIOS_H
+#include <termios.h>
+#endif
+
+#ifdef HAVE_FCNTL_H
+#include <fcntl.h>
+#endif
+
+#define SB_NB (0)
+#define SB_LB (-1)
+#define SB_BB (-2)
+
+StgInt
+setBuffering(fp, size)
+StgAddr fp;
+StgInt size;
+{
+ int flags;
+ int input;
+ struct termios tio;
+
+ while ((flags = fcntl(fileno((FILE *) fp), F_GETFL)) < 0) {
+ if (errno != EINTR) {
+ cvtErrno();
+ stdErrno();
+ return -1;
+ }
+ }
+ flags &= O_ACCMODE;
+ input = flags == O_RDONLY || flags == O_RDWR;
+
+ switch (size) {
+ case SB_NB:
+ if (setvbuf((FILE *) fp, NULL, _IONBF, 0L) != 0) {
+ cvtErrno();
+ stdErrno();
+ return -1;
+ }
+ if (input && isatty(fileno((FILE *) fp))) {
+
+ /*
+ * Try to switch to CBREAK mode, or whatever they call it these days.
+ */
+
+ if (tcgetattr(fileno((FILE *) fp), &tio) < 0) {
+ cvtErrno();
+ stdErrno();
+ return -1;
+ }
+ tio.c_lflag &= ~ICANON;
+ tio.c_cc[VMIN] = 1;
+ tio.c_cc[VTIME] = 0;
+ if (tcsetattr(fileno((FILE *) fp), TCSANOW, &tio) < 0) {
+ cvtErrno();
+ stdErrno();
+ return -1;
+ }
+ }
+ return 0;
+ break;
+ case SB_LB:
+ if (setvbuf((FILE *) fp, NULL, _IOLBF, BUFSIZ) != 0) {
+ cvtErrno();
+ stdErrno();
+ return -1;
+ }
+ break;
+ case SB_BB:
+
+ /*
+ * We should actually peek at the buffer size in the stat struct, if there
+ * is one. Something to occupy us later, when we're bored.
+ */
+ size = BUFSIZ;
+ /* fall through */
+ default:
+ if (setvbuf((FILE *) fp, NULL, _IOFBF, size) != 0) {
+ cvtErrno();
+ stdErrno();
+ return -1;
+ }
+ break;
+ }
+ if (input && isatty(fileno((FILE *) fp))) {
+
+ /*
+ * Try to switch back to cooked mode.
+ */
+
+ if (tcgetattr(fileno((FILE *) fp), &tio) < 0) {
+ cvtErrno();
+ stdErrno();
+ return -1;
+ }
+ tio.c_lflag |= ICANON;
+ if (tcsetattr(fileno((FILE *) fp), TCSANOW, &tio) < 0) {
+ cvtErrno();
+ stdErrno();
+ return -1;
+ }
+ }
+ return 0;
+}
+
+\end{code}
diff --git a/ghc/lib/cbits/setCurrentDirectory.lc b/ghc/lib/cbits/setCurrentDirectory.lc
new file mode 100644
index 0000000000..96fdf59fa9
--- /dev/null
+++ b/ghc/lib/cbits/setCurrentDirectory.lc
@@ -0,0 +1,25 @@
+%
+% (c) The GRASP/AQUA Project, Glasgow University, 1995
+%
+\subsection[setCurrentDirectory.lc]{setCurrentDirectory Runtime Support}
+
+\begin{code}
+
+#include "rtsdefs.h"
+#include "stgio.h"
+
+StgInt
+setCurrentDirectory(path)
+StgByteArray path;
+{
+ while (chdir(path) != 0) {
+ if (errno != EINTR) {
+ cvtErrno();
+ stdErrno();
+ return -1;
+ }
+ }
+ return 0;
+}
+
+\end{code}
diff --git a/ghc/lib/cbits/stgio.h b/ghc/lib/cbits/stgio.h
index 26f09ee3b5..791323769a 100644
--- a/ghc/lib/cbits/stgio.h
+++ b/ghc/lib/cbits/stgio.h
@@ -122,39 +122,4 @@ StgAddr toClockSec PROTO((StgInt, StgInt, StgInt, StgInt, StgInt, StgInt, StgInt
/* writeFile.lc */
StgInt writeFile PROTO((StgAddr, StgAddr, StgInt));
-/* SOCKET THINGS ALL TOGETHER: */
-
-#if 0
-LATER
-/* acceptSocket.lc */
-StgInt acceptSocket(I_ sockfd, A_ peer, A_ addrlen);
-
-/* bindSocket.lc */
-StgInt bindSocket(I_ sockfd, A_ myaddr, I_ addrlen, I_ isUnixDomain);
-
-/* connectSocket.lc */
-StgInt connectSocket(I_ sockfd, A_ servaddr, I_ addrlen, I_ isUnixDomain);
-
-/* createSocket.lc */
-StgInt createSocket(I_ family, I_ type, I_ protocol);
-
-/* getPeerName.lc */
-StgInt getPeerName(int sockfd, struct sockaddr *peer, int *namelen);
-
-/* getSockName.lc */
-StgInt getSockName(int sockfd, struct sockaddr *peer, int *namelen);
-
-/* listenSocket.lc */
-StgInt listenSocket(int sockfd, int backlog);
-
-/* readDescriptor.lc */
-StgInt readDescriptor(int fd, char *buf, int nbytes);
-
-/* shutdownSocket.lc */
-StgInt shutdownSocket(int sockfd, int how);
-
-/* writeDescriptor.lc */
-StgInt writeDescriptor(int fd, char *buf, int nbytes);
-#endif /* 0 */
-
#endif /* ! STGIO_H */
diff --git a/ghc/lib/cbits/system.lc b/ghc/lib/cbits/system.lc
new file mode 100644
index 0000000000..013f111ba6
--- /dev/null
+++ b/ghc/lib/cbits/system.lc
@@ -0,0 +1,65 @@
+%
+% (c) The GRASP/AQUA Project, Glasgow University, 1995
+%
+\subsection[system.lc]{system Runtime Support}
+
+\begin{code}
+
+#include "rtsdefs.h"
+#include "stgio.h"
+
+#ifdef HAVE_SYS_WAIT_H
+#include <sys/wait.h>
+#endif
+
+#ifdef HAVE_VFORK_H
+#include <vfork.h>
+#endif
+
+#ifdef HAVE_VFORK
+#define fork vfork
+#endif
+
+StgInt
+systemCmd(cmd)
+StgByteArray cmd;
+{
+ int pid;
+ int wstat;
+
+ switch(pid = fork()) {
+ case -1:
+ if (errno != EINTR) {
+ cvtErrno();
+ stdErrno();
+ return -1;
+ }
+ case 0:
+ /* the child */
+ execl("/bin/sh", "sh", "-c", cmd, NULL);
+ _exit(127);
+ }
+
+ while (waitpid(pid, &wstat, 0) < 0) {
+ if (errno != EINTR) {
+ cvtErrno();
+ stdErrno();
+ return -1;
+ }
+ }
+
+ if (WIFEXITED(wstat))
+ return WEXITSTATUS(wstat);
+ else if (WIFSIGNALED(wstat)) {
+ ghc_errtype = ERR_INTERRUPTED;
+ ghc_errstr = "system command interrupted";
+ }
+ else {
+ /* This should never happen */
+ ghc_errtype = ERR_OTHERERROR;
+ ghc_errstr = "internal error (process neither exited nor signalled)";
+ }
+ return -1;
+}
+
+\end{code}
diff --git a/ghc/lib/cbits/writeFile.lc b/ghc/lib/cbits/writeFile.lc
new file mode 100644
index 0000000000..6981bf128c
--- /dev/null
+++ b/ghc/lib/cbits/writeFile.lc
@@ -0,0 +1,38 @@
+%
+% (c) The GRASP/AQUA Project, Glasgow University, 1994
+%
+\subsection[writeFile.lc]{hPutStr Runtime Support}
+
+\begin{code}
+
+#include "rtsdefs.h"
+#include "stgio.h"
+
+StgInt
+writeFile(buf, fp, bytes)
+StgAddr buf;
+StgAddr fp;
+StgInt bytes;
+{
+ int count;
+ char *p = (char *) buf;
+
+ if (bytes == 0)
+ return 0;
+
+ /* Disallow short writes */
+ while ((count = fwrite(p, 1, bytes, (FILE *) fp)) < bytes) {
+ if (errno != EINTR) {
+ cvtErrno();
+ stdErrno();
+ return -1;
+ }
+ bytes -= count;
+ p += count;
+ clearerr((FILE *) fp);
+ }
+
+ return 0;
+}
+
+\end{code}
diff --git a/ghc/lib/concurrent/Merge.hs b/ghc/lib/concurrent/Merge.hs
index 7d809927d8..2c2ae77465 100644
--- a/ghc/lib/concurrent/Merge.hs
+++ b/ghc/lib/concurrent/Merge.hs
@@ -18,7 +18,8 @@ module Merge
import Semaphore
-import PreludeGlaST
+import GHCbase
+import GHCio ( stThen )
import Concurrent ( forkIO )
max_buff_size = 1
@@ -65,10 +66,10 @@ suckIO branches_running buff@(tail_list,e) vs
waitQSem e >>
takeMVar tail_list >>= \ node ->
newEmptyMVar >>= \ next_node ->
- unsafeInterleavePrimIO (
- takeMVar next_node `thenPrimIO` \ (Right x) ->
- signalQSem e `seqPrimIO`
- returnPrimIO x) `thenPrimIO` \ next_node_val ->
+ unsafeInterleavePrimIO ( ioToPrimIO $
+ takeMVar next_node >>= \ x ->
+ signalQSem e >>
+ return x) `stThen` \ next_node_val ->
putMVar node (x:next_node_val) >>
putMVar tail_list next_node >>
suckIO branches_running buff xs
diff --git a/ghc/lib/concurrent/Parallel.hs b/ghc/lib/concurrent/Parallel.hs
index 6f8b2c422a..be828313ba 100644
--- a/ghc/lib/concurrent/Parallel.hs
+++ b/ghc/lib/concurrent/Parallel.hs
@@ -12,6 +12,7 @@ module Parallel (par, seq -- re-exported
) where
import GHCbase ( par, seq )
+import GHCerr ( parError )
#if defined(__GRANSIM__)
diff --git a/ghc/lib/prelude/GHCbase.hs b/ghc/lib/prelude/GHCbase.hs
index 8cb4cd9a36..5f48825ffc 100644
--- a/ghc/lib/prelude/GHCbase.hs
+++ b/ghc/lib/prelude/GHCbase.hs
@@ -14,6 +14,7 @@ import Ratio
import qualified GHCps ( packString, packCBytes, comparePS, unpackPS )
import qualified GHCio ( IOError )
import qualified Monad
+import GHCerr
infixr 0 `seq`, `par`, `fork`
@@ -85,14 +86,29 @@ instance Show PackedString where
---------------------------------------------------------------
data State a = S# (State# a)
+
data ForeignObj = ForeignObj ForeignObj#
+instance CCallable ForeignObj
+
#ifndef __PARALLEL_HASKELL__
data StablePtr a = StablePtr (StablePtr# a)
-#endif
-
instance CCallable (StablePtr a)
-instance CCallable ForeignObj
instance CReturnable (StablePtr a)
+#endif
+
+eqForeignObj :: ForeignObj -> ForeignObj -> Bool
+makeForeignObj :: Addr -> Addr -> PrimIO ForeignObj
+
+makeForeignObj (A# obj) (A# finaliser) = ST $ \ (S# s#) ->
+ case makeForeignObj# obj finaliser s# of
+ StateAndForeignObj# s1# fo# -> (ForeignObj fo#, S# s1#)
+
+eqForeignObj mp1 mp2
+ = unsafePerformPrimIO (_ccall_ eqForeignObj mp1 mp2) /= (0::Int)
+
+instance Eq ForeignObj where
+ p == q = eqForeignObj p q
+ p /= q = not (eqForeignObj p q)
#ifndef __PARALLEL_HASKELL__
@@ -104,8 +120,6 @@ makeStablePtr :: a -> PrimIO (StablePtr a)
deRefStablePtr :: StablePtr a -> PrimIO a
freeStablePtr :: StablePtr a -> PrimIO ()
-eqForeignObj :: ForeignObj -> ForeignObj -> Bool
-makeForeignObj :: Addr -> Addr -> PrimIO ForeignObj
performGC :: PrimIO ()
{-# INLINE deRefStablePtr #-}
@@ -122,17 +136,6 @@ deRefStablePtr (StablePtr sp#) = ST $ \ (S# rw1#) ->
freeStablePtr sp = _ccall_ freeStablePointer sp
-makeForeignObj (A# obj) (A# finaliser) = ST $ \ (S# s#) ->
- case makeForeignObj# obj finaliser s# of
- StateAndForeignObj# s1# fo# -> (ForeignObj fo#, S# s1#)
-
-eqForeignObj mp1 mp2
- = unsafePerformPrimIO (_ccall_ eqForeignObj mp1 mp2) /= (0::Int)
-
-instance Eq ForeignObj where
- p == q = eqForeignObj p q
- p /= q = not (eqForeignObj p q)
-
performGC = _ccall_GC_ StgPerformGarbageCollection
#endif /* !__PARALLEL_HASKELL__ */
@@ -185,7 +188,7 @@ instance Monad (ST s) where
{-# INLINE return #-}
{-# INLINE (>>) #-}
{-# INLINE (>>=) #-}
- return x = ST $ \ s -> (x, s)
+ return x = ST $ \ s@(S# _) -> (x, s)
m >> k = m >>= \ _ -> k
(ST m) >>= k
@@ -266,9 +269,9 @@ forkST (ST action) = ST $ \ s ->
let
(r, new_s) = action s
in
- new_s `_fork_` (r, s)
+ new_s `fork__` (r, s)
where
- _fork_ x y = case (fork# x) of { 0# -> parError#; _ -> y }
+ fork__ x y = case (fork# x) of { 0# -> parError; _ -> y }
#endif {- concurrent -}
@@ -1093,22 +1096,16 @@ seq, par, fork :: Eval a => a -> b -> b
{-# INLINE par #-}
{-# INLINE fork #-}
+#ifdef __CONCURRENT_HASKELL__
seq x y = case (seq# x) of { 0# -> parError; _ -> y }
par x y = case (par# x) of { 0# -> parError; _ -> y }
fork x y = case (fork# x) of { 0# -> parError; _ -> y }
+#else
+seq x y = y
+par x y = y
+fork x y = y
+#endif
----------------------------------------------------------------
--- HACK: Magic unfoldings not implemented for unboxed lists
--- Need to define a "build" to avoid undefined symbol
-
-build = error "GHCbase.build"
-augment = error "GHCbase.augment"
---{-# GENERATE_SPECS build a #-}
---build :: ((a -> [a] -> [a]) -> [a] -> [a]) -> [a]
---build g = g (:) []
-
-
----------------------------------------------------------------
-- string-support functions:
---------------------------------------------------------------
@@ -1212,32 +1209,6 @@ fputs stream (c : cs)
fputs stream cs -- (just does some casting stream)
---------------------------------------------------------------
--- Used for compiler-generated error message;
--- encoding saves bytes of string junk.
-
-absentErr, parError :: a
-irrefutPatError
- , noDefaultMethodError
- , noExplicitMethodError
- , nonExhaustiveGuardsError
- , patError
- , recConError
- , recUpdError :: String -> a
-
-absentErr = error "Oops! The program has entered an `absent' argument!\n"
-parError = error "Oops! Entered GHCbase.parError (a GHC bug -- please report it!)\n"
-
-irrefutPatError s = error ("irrefutPatError:"++s)
-noDefaultMethodError s = error ("noDefaultMethodError:"++s)
-noExplicitMethodError s = error ("noExplicitMethodError:"++s)
-nonExhaustiveGuardsError s = error ("nonExhaustiveGuardsError:"++s)
-
-patError msg
- = error__ (\ x -> _ccall_ PatErrorHdrHook x) ("Pattern-matching failed in: "++msg++"\n")
-recConError s = error ("recConError:"++s)
-recUpdError s = error ("recUpdError:"++s)
-
----------------------------------------------------------------
-- ******** defn of `_trace' using Glasgow IO *******
{-# GENERATE_SPECS _trace a #-}
diff --git a/ghc/lib/prelude/GHCerr.hs b/ghc/lib/prelude/GHCerr.hs
new file mode 100644
index 0000000000..202fee26be
--- /dev/null
+++ b/ghc/lib/prelude/GHCerr.hs
@@ -0,0 +1,66 @@
+{- The GHCerr module defines the code for the
+ wired-in error functions, which have a special
+ type in the compiler (with "open tyvars").
+
+ We cannot define these functions in a module where they might be
+ used (e.g., GHCbase), because the magical wired-in type will get
+ confused with what the typechecker figures out.
+-}
+module GHCerr where
+
+import GHCbase (error__)
+
+---------------------------------------------------------------
+-- HACK: Magic unfoldings not implemented for unboxed lists
+-- Need to define a "build" to avoid undefined symbol
+-- in this module to avoid .hi proliferation.
+
+build = error "GHCbase.build"
+augment = error "GHCbase.augment"
+--{-# GENERATE_SPECS build a #-}
+--build :: ((a -> [a] -> [a]) -> [a] -> [a]) -> [a]
+--build g = g (:) []
+
+
+---------------------------------------------------------------
+cannon_fodder_to_avoid_empty__versions__ = (1::Int)
+
+-- Used for compiler-generated error message;
+-- encoding saves bytes of string junk.
+
+absentErr, parError :: a
+irrefutPatError
+ , noDefaultMethodError
+ , noExplicitMethodError
+ , nonExhaustiveGuardsError
+ , patError
+ , recConError
+ , recUpdError :: String -> a
+
+absentErr = error "Oops! The program has entered an `absent' argument!\n"
+parError = error "Oops! Entered GHCbase.parError (a GHC bug -- please report it!)\n"
+
+noDefaultMethodError s = error ("noDefaultMethodError:"++s)
+noExplicitMethodError s = error ("noExplicitMethodError:"++s)
+
+irrefutPatError s = patError__ (untangle s "irrefutable pattern")
+nonExhaustiveGuardsError s = patError__ (untangle s "non-exhaustive guards")
+patError s = patError__ (untangle s "pattern-matching")
+
+patError__ = error__ (\ x -> _ccall_ PatErrorHdrHook x)
+
+recConError s = error (untangle s "record constructor")
+recUpdError s = error (untangle s "record update")
+
+untangle coded in_str
+ = "In " ++ in_str
+ ++ (if null msg then "" else (": " ++ msg))
+ ++ "; at " ++ file
+ ++ ", line " ++ line
+ ++ "\n"
+ where
+ (file,line,msg)
+ = case (span not_bar coded) of { (f, (_:rest)) ->
+ case (span not_bar rest) of { (l, (_:m)) ->
+ (f,l,m) }}
+ not_bar c = c /= '|'
diff --git a/ghc/lib/prelude/Main.mc_hi b/ghc/lib/prelude/Main.mc_hi
new file mode 100644
index 0000000000..8ed9e1ae99
--- /dev/null
+++ b/ghc/lib/prelude/Main.mc_hi
@@ -0,0 +1,5 @@
+interface Main 1
+__exports__
+Main main (..)
+__declarations__
+Main.main :: GHCbase.IO Prelude.();
diff --git a/ghc/lib/prelude/Main.mg_hi b/ghc/lib/prelude/Main.mg_hi
new file mode 100644
index 0000000000..8ed9e1ae99
--- /dev/null
+++ b/ghc/lib/prelude/Main.mg_hi
@@ -0,0 +1,5 @@
+interface Main 1
+__exports__
+Main main (..)
+__declarations__
+Main.main :: GHCbase.IO Prelude.();
diff --git a/ghc/lib/prelude/Main.mp_hi b/ghc/lib/prelude/Main.mp_hi
new file mode 100644
index 0000000000..8ed9e1ae99
--- /dev/null
+++ b/ghc/lib/prelude/Main.mp_hi
@@ -0,0 +1,5 @@
+interface Main 1
+__exports__
+Main main (..)
+__declarations__
+Main.main :: GHCbase.IO Prelude.();
diff --git a/ghc/lib/prelude/Main.p_hi b/ghc/lib/prelude/Main.p_hi
new file mode 100644
index 0000000000..8ed9e1ae99
--- /dev/null
+++ b/ghc/lib/prelude/Main.p_hi
@@ -0,0 +1,5 @@
+interface Main 1
+__exports__
+Main main (..)
+__declarations__
+Main.main :: GHCbase.IO Prelude.();
diff --git a/ghc/lib/prelude/Prelude.hs b/ghc/lib/prelude/Prelude.hs
index 96bd471f86..7bf33a923f 100644
--- a/ghc/lib/prelude/Prelude.hs
+++ b/ghc/lib/prelude/Prelude.hs
@@ -63,6 +63,11 @@ module Prelude (
(,,,,,,,,,,,,,,,,,,,,,,,,,,,,,)(..),
(,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,)(..),
(,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,)(..),
+ (,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,)(..),
+ (,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,)(..),
+ (,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,)(..),
+ (,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,)(..),
+ (,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,)(..),
-- Functions: (->)
Eq((==), (/=)),
Ord(compare, (<), (<=), (>=), (>), max, min),
@@ -72,7 +77,7 @@ module Prelude (
Eval(..{-seq, strict-}), seq, strict, -- NB: glasgow hack
Num((+), (-), (*), negate, abs, signum, fromInteger, fromInt{-partain-}),
Real(toRational),
- Integral(quot, rem, div, mod, quotRem, divMod, toInteger),
+ Integral(quot, rem, div, mod, quotRem, divMod, toInteger, toInt{-partain-}),
Fractional((/), recip, fromRational),
Floating(pi, exp, log, sqrt, (**), logBase, sin, cos, tan,
asin, acos, atan, sinh, cosh, tanh, asinh, acosh, atanh),
@@ -189,12 +194,13 @@ class (Real a, Enum a) => Integral a where
quot, rem, div, mod :: a -> a -> a
quotRem, divMod :: a -> a -> (a,a)
toInteger :: a -> Integer
+ toInt :: a -> Int -- partain: Glasgow extension
n `quot` d = q where (q,r) = quotRem n d
n `rem` d = r where (q,r) = quotRem n d
n `div` d = q where (q,r) = divMod n d
n `mod` d = r where (q,r) = divMod n d
- divMod n d = if signum r == - signum d then (q-1, r+d) else qr
+ divMod n d = if signum r == negate (signum d) then (q-1, r+d) else qr
where qr@(q,r) = quotRem n d
class (Num a) => Fractional a where
@@ -254,7 +260,7 @@ class (RealFrac a, Floating a) => RealFloat a where
exponent x = if m == 0 then 0 else n + floatDigits x
where (m,n) = decodeFloat x
- significand x = encodeFloat m (- floatDigits x)
+ significand x = encodeFloat m (negate (floatDigits x))
where (m,_) = decodeFloat x
scaleFloat k x = encodeFloat m (n+k)
@@ -293,7 +299,7 @@ x ^ n | n > 0 = f x (n-1) x
_ ^ _ = error "Prelude.^: negative exponent"
(^^) :: (Fractional a, Integral b) => a -> b -> a
-x ^^ n = if n >= 0 then x^n else recip (x^(-n))
+x ^^ n = if n >= 0 then x^n else recip (x^(negate n))
fromIntegral :: (Integral a, Num b) => a -> b
fromIntegral = fromInteger . toInteger
@@ -306,7 +312,7 @@ atan2 y x = case (signum y, signum x) of
( 0, 1) -> 0
( 1, 0) -> pi/2
( 0,-1) -> pi
- (-1, 0) -> -pi/2
+ (-1, 0) -> (negate pi)/2
( _, 1) -> atan (y/x)
( _,-1) -> atan (y/x) + pi
( 0, 0) -> error "Prelude.atan2: atan2 of origin"
@@ -553,8 +559,8 @@ instance CCallable Int
instance CReturnable Int
instance Bounded Int where
- minBound = -2147483647 -- **********************
- maxBound = 2147483647 -- **********************
+ minBound = negate 2147483647 -- **********************
+ maxBound = 2147483647 -- **********************
instance Num Int where
(+) x y = plusInt x y
@@ -603,7 +609,7 @@ instance Integral Int where
--OLD: odd x = neInt (x `mod` 2) 0
toInteger (I# n#) = int2Integer# n# -- give back a full-blown Integer
--- toInt x = x
+ toInt x = x
instance Enum Int where
toEnum x = x
@@ -614,9 +620,9 @@ instance Enum Int where
#else
{-# INLINE enumFrom #-}
{-# INLINE enumFromTo #-}
- enumFrom x = _build (\ c _ ->
+ enumFrom x = build (\ c _ ->
let g x = x `c` g (x `plusInt` 1) in g x)
- enumFromTo x y = _build (\ c n ->
+ enumFromTo x y = build (\ c n ->
let g x = if x <= y then x `c` g (x `plusInt` 1) else n in g x)
#endif
enumFromThen m n = en' m (n `minusInt` m)
@@ -696,7 +702,7 @@ instance Num Integer where
in
if cmp ># 0# then 1
else if cmp ==# 0# then 0
- else -1
+ else (negate 1)
}
fromInteger x = x
@@ -720,7 +726,7 @@ instance Integral Integer where
-> (J# a3 s3 d3, J# a4 s4 d4)
-}
toInteger n = n
--- toInt (J# a s d) = case (integer2Int# a s d) of { n# -> I# n# }
+ toInt (J# a s d) = case (integer2Int# a s d) of { n# -> I# n# }
-- the rest are identical to the report default methods;
-- you get slightly better code if you let the compiler
@@ -731,7 +737,7 @@ instance Integral Integer where
n `mod` d = r where (q,r) = divMod n d
divMod n d = case (quotRem n d) of { qr@(q,r) ->
- if signum r == - signum d then (q - 1, r+d) else qr }
+ if signum r == negate (signum d) then (q - 1, r+d) else qr }
-- Case-ified by WDP 94/10
instance Enum Integer where
@@ -764,9 +770,9 @@ instance Num Float where
(*) x y = timesFloat x y
abs x | x >= 0.0 = x
| otherwise = negateFloat x
- signum x | x == 0.0 = 0
- | x > 0.0 = 1
- | otherwise = -1
+ signum x | x == 0.0 = 0
+ | x > 0.0 = 1
+ | otherwise = negate 1
fromInteger n = encodeFloat n 0
fromInt i = int2Float i
@@ -821,7 +827,7 @@ instance RealFrac Float where
if n >= 0 then
(fromInteger m * fromInteger b ^ n, 0.0)
else
- case (quotRem m (b^(-n))) of { (w,r) ->
+ case (quotRem m (b^(negate n))) of { (w,r) ->
(fromInteger w, encodeFloat r n)
}
}
@@ -862,7 +868,7 @@ instance RealFloat Float where
(m,n) -> if m == 0 then 0 else n + floatDigits x
significand x = case decodeFloat x of
- (m,_) -> encodeFloat m (- (floatDigits x))
+ (m,_) -> encodeFloat m (negate (floatDigits x))
scaleFloat k x = case decodeFloat x of
(m,n) -> encodeFloat m (n+k)
@@ -889,9 +895,9 @@ instance Num Double where
(*) x y = timesDouble x y
abs x | x >= 0.0 = x
| otherwise = negateDouble x
- signum x | x == 0.0 = 0
- | x > 0.0 = 1
- | otherwise = -1
+ signum x | x == 0.0 = 0
+ | x > 0.0 = 1
+ | otherwise = negate 1
fromInteger n = encodeFloat n 0
fromInt (I# n#) = case (int2Double# n#) of { d# -> D# d# }
@@ -954,7 +960,7 @@ instance RealFrac Double where
if n >= 0 then
(fromInteger m * fromInteger b ^ n, 0.0)
else
- case (quotRem m (b^(-n))) of { (w,r) ->
+ case (quotRem m (b^(negate n))) of { (w,r) ->
(fromInteger w, encodeFloat r n)
}
}
@@ -995,7 +1001,7 @@ instance RealFloat Double where
(m,n) -> if m == 0 then 0 else n + floatDigits x
significand x = case decodeFloat x of
- (m,_) -> encodeFloat m (- (floatDigits x))
+ (m,_) -> encodeFloat m (negate (floatDigits x))
scaleFloat k x = case decodeFloat x of
(m,n) -> encodeFloat m (n+k)
@@ -1138,6 +1144,18 @@ data (,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,) a b c d e f g h i j k l m n o p q r s t u
= (,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,) a b c d e f g h i j k l m n o p q r s t u v w x y z a_ b_ c_ d_ e_
data (,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,) a b c d e f g h i j k l m n o p q r s t u v w x y z a_ b_ c_ d_ e_ f_
= (,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,) a b c d e f g h i j k l m n o p q r s t u v w x y z a_ b_ c_ d_ e_ f_
+data (,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,) a b c d e f g h i j k l m n o p q r s t u v w x y z a_ b_ c_ d_ e_ f_ g_
+ = (,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,) a b c d e f g h i j k l m n o p q r s t u v w x y z a_ b_ c_ d_ e_ f_ g_
+data (,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,) a b c d e f g h i j k l m n o p q r s t u v w x y z a_ b_ c_ d_ e_ f_ g_ h_
+ = (,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,) a b c d e f g h i j k l m n o p q r s t u v w x y z a_ b_ c_ d_ e_ f_ g_ h_
+data (,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,) a b c d e f g h i j k l m n o p q r s t u v w x y z a_ b_ c_ d_ e_ f_ g_ h_ i_
+ = (,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,) a b c d e f g h i j k l m n o p q r s t u v w x y z a_ b_ c_ d_ e_ f_ g_ h_ i_
+data (,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,) a b c d e f g h i j k l m n o p q r s t u v w x y z a_ b_ c_ d_ e_ f_ g_ h_ i_ j_
+ = (,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,) a b c d e f g h i j k l m n o p q r s t u v w x y z a_ b_ c_ d_ e_ f_ g_ h_ i_ j_
+data (,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,) a b c d e f g h i j k l m n o p q r s t u v w x y z a_ b_ c_ d_ e_ f_ g_ h_ i_ j_ k_
+ = (,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,) a b c d e f g h i j k l m n o p q r s t u v w x y z a_ b_ c_ d_ e_ f_ g_ h_ i_ j_ k_
+ -- if you add more tuples, you need to change the compiler, too
+ -- (it has a wired-in number: 37)
instance (Read a, Read b) => Read (a,b) where
readsPrec p = readParen False
diff --git a/ghc/lib/prelude/PreludeGlaST.hs b/ghc/lib/prelude/PreludeGlaST.hs
index 11f6cf7c5c..179b648b89 100644
--- a/ghc/lib/prelude/PreludeGlaST.hs
+++ b/ghc/lib/prelude/PreludeGlaST.hs
@@ -8,7 +8,7 @@ module PreludeGlaST (
MutableArray,
MutableByteArray,
PrimIO,
- Addr, Word,
+ Addr(..), Word(..),
CCallable(..), CReturnable(..),
boundsOfArray,
@@ -57,6 +57,7 @@ module PreludeGlaST (
returnPrimIO,
returnST,
returnStrictlyST,
+ runST,
primIOToIO,
ioToPrimIO,
sameMutableArray,
@@ -82,6 +83,12 @@ module PreludeGlaST (
writeFloatArray,
writeIntArray,
writeVar
+#ifndef __PARALLEL_HASKELL__
+ , makeStablePtr
+ , deRefStablePtr
+ , freeStablePtr
+ , performGC
+#endif
) where
import GHCbase
diff --git a/ghc/lib/required/List.hs b/ghc/lib/required/List.hs
index c89357bb90..40d1153865 100644
--- a/ghc/lib/required/List.hs
+++ b/ghc/lib/required/List.hs
@@ -18,11 +18,11 @@ delete = deleteBy (==)
deleteBy :: (a -> a -> Bool) -> a -> [a] -> [a]
deleteBy eq x [] = []
-deleteBy eq x (y:ys) = if x `eq` y then ys else deleteBy eq x ys
+deleteBy eq x (y:ys) = if x `eq` y then ys else y : deleteBy eq x ys
-- list difference (non-associative). In the result of xs \\ ys,
-- the first occurrence of each element of ys in turn (if any)
--- has been removed from xs. This (xs ++ ys) \\ xs == ys.
+-- has been removed from xs. Thus, (xs ++ ys) \\ xs == ys.
(\\) :: (Eq a) => [a] -> [a] -> [a]
(\\) = foldl (flip delete)
diff --git a/ghc/misc/spat-analysers/README b/ghc/misc/spat-analysers/README
deleted file mode 100644
index 9165665f30..0000000000
--- a/ghc/misc/spat-analysers/README
+++ /dev/null
@@ -1,22 +0,0 @@
-This directory (ghc/misc/spat-analysers) includes the source for
-"analysers" to use with the SPAT (SPARC Performance Analysis Tools?)
-system, which includes "shade", "shadow", and "spixtools".
-
-The analysers here are no actual use to you unless you have the SPAT
-system from Sun. Bob Cmelik was the ringleader at Sun for quite
-some time. The current person to contact about it is:
-
- John Rodriguez
- Sun Microsystems Laboratories, Inc.
- 2550 Garcia Avenue, MS 29-225
- Mountain View, CA 94043
- (415) 336-1709
- john.rodriguez@sun.com
-
-We are supplying these analysers so you can see the Cool Things you
-can do with them, in the hope that you will be motivated to build upon
-our work.
-
-Will Partain
-AQUA Project
-95/07/18
diff --git a/ghc/misc/spat-analysers/REGSTATS b/ghc/misc/spat-analysers/REGSTATS
deleted file mode 100644
index 8dc860dafe..0000000000
--- a/ghc/misc/spat-analysers/REGSTATS
+++ /dev/null
@@ -1,18 +0,0 @@
-Final:
-SpA 0 0
-SpB 0 0
-Hp 0 0
-HpLim 0 0
-SuA 0 0
-SuB 0 0
-UpdReg 0 0
-RetVecReg 0 0
-TagReg 0 0
-Ret1 0 0
-Ret2 0 0
-Ret3 0 0
-Ret4 0 0
-Ret5 0 0
-Ret6 0 0
-Ret7 0 0
-Ret8 0 0
diff --git a/ghc/misc/spat-analysers/StgRegAddrs.h b/ghc/misc/spat-analysers/StgRegAddrs.h
deleted file mode 100644
index 2aa81fc81e..0000000000
--- a/ghc/misc/spat-analysers/StgRegAddrs.h
+++ /dev/null
@@ -1,19 +0,0 @@
-/* Produced from: nm -n */
-
-#define UpdReg 0x00b5098
-#define SpA 0x00b50a0
-#define SuA 0x00b50a8
-#define SuB 0x00b50b0
-#define SpB 0x00b50b8
-#define Ret1 0x00b50e8
-#define Ret2 0x00b50f0
-#define Ret3 0x00b50f8
-#define HpLim 0x00b5100
-#define Hp 0x00b5108
-#define RetVecReg 0x00b5110
-#define TagReg 0x00b5118
-#define Ret5 0x00b5120
-#define Ret4 0x00b5130
-#define Ret6 0x00b5138
-#define Ret7 0x00b5140
-#define Ret8 0x00b5148
diff --git a/ghc/misc/spat-analysers/icount.c b/ghc/misc/spat-analysers/icount.c
deleted file mode 100644
index e47bd11d73..0000000000
--- a/ghc/misc/spat-analysers/icount.c
+++ /dev/null
@@ -1,91 +0,0 @@
-#define VERSION "24-Jan-94"
-#define PROGNAME "ICount"
-
-#define SHADE
-
-#include <stdio.h>
-
-#include <IHASH.h>
-#include <ITYPES.h>
-#include <instr.h>
-#include <inames.h>
-
-#include <shade.h>
-#define TR_REGS
-#include <trace.h>
-#include <stdtr.h>
-#include <trctl.h>
-
-static long long info[NIHASH];
-
-#define STATS_FILE "ICNT"
-
-/* fwd decls */
-void print_results();
-
-#define CHECKPOINT 1000000 /* reporting frequency */
-static long countdown = CHECKPOINT;
-
-char *anal_usage = "";
-char *anal_version = VERSION;
-
-initialize(argc,argv,envp)
- int argc;
- char **argv, envp;
-{
- unsigned i, j;
-
- /* Setup the trace */
- shade_trctl_trsize(sizeof(Trace));
-
- shade_trctl_it (IT_ANY, 1, 0, TC_IH);
-
- /* init table */
- for (j = 0; j < NIHASH; j++)
- info[j] = 0LL;
-}
-
-int analyze(argc,argv,envp)
- int argc;
- char **argv, envp;
-{
- Trace *tr;
- int i;
-
- for (i = 0; tr = shade_step(); i++) {
-
- info[tr->tr_ih] += 1LL;
-
- if (countdown-- < 0) {
- print_results("Intermediate:");
- countdown = CHECKPOINT;
- }
- }
- return(0);
-}
-
-void
-terminate()
-{
- print_results("Final:");
-}
-
-void
-print_results(header)
- char *header;
-{
- int i, j;
- static FILE *statf = NULL;
-
- if ((statf = fopen("ICNT", "w")) == NULL) {
- fprintf(stderr, "Cannot open statistics file ICNT\n");
- exit(1);
- }
- fprintf(statf, "%s\n\n", header);
-
- for (i = 0; i < NIHASH; i++) {
- fprintf(statf, "%8x: %8ld\n", i, (long) info[i]);
- }
-
- fclose(statf);
-}
diff --git a/ghc/misc/spat-analysers/icount_by_activity.c b/ghc/misc/spat-analysers/icount_by_activity.c
deleted file mode 100644
index 84daf6f8a9..0000000000
--- a/ghc/misc/spat-analysers/icount_by_activity.c
+++ /dev/null
@@ -1,396 +0,0 @@
-#define VERSION "24-Jan-95"
-#define PROGNAME "ICountByActivity"
-
-#define SHADE
-
-#include <stdio.h>
-
-#include <IHASH.h>
-#include <ITYPES.h>
-#include <instr.h>
-#include <inames.h>
-
-#include <shade.h>
-#define TR_REGS
-#include <trace.h>
-#include <stdtr.h>
-#include <trctl.h>
-
-int shade_run(Trace *, int);
-
-#define DO_SPAT_PROFILING
-#define __STG_USING_ULLONG__
-#include "stgdefs.h" /* GHC ticky-counting stuff */
-#define ACTIVITY_REG I_REG_g5
-#define SpA_REG I_REG_i0
-#define SpB_REG I_REG_i2
-#define Hp_REG I_REG_i4
-#define RET_REG I_REG_l0
-#define NODE_REG I_REG_l1
-#define INFO_REG I_REG_l2
-#define R3_REG I_REG_l3
-#define R7_REG I_REG_l7
-
-/* Activity register and current activity */
-
-#define EACT_CALL (ACTIVITIES+0)
-#define EACT_STKADJ (ACTIVITIES+1)
-#define EACT_ASTK (ACTIVITIES+2)
-#define EACT_BSTK (ACTIVITIES+3)
-#define EACT_RETREG (ACTIVITIES+4)
-#define EACT_ARGREGS (ACTIVITIES+5)
-
-#define EACT_TAILCALL (ACT_TAILCALL - ACT_BASE) /* use the TAILCALL slot */
-#define EACT_OVERHEAD (ACT_OVERHEAD - ACT_BASE) /* only used herein */
-
-#define EXTRA_ACTIVITIES 6
-
-#define TOTAL_ACTIVITIES (ACTIVITIES+EXTRA_ACTIVITIES)
-
-static ullong info[TOTAL_ACTIVITIES][NIHASH];
-/*static ullong annulled_insns = 0;*/
-
-#define STATS_FILE "ICNT_BY_ACTIVITY"
-
-/* fwd decls */
-void print_results(char *);
-void fprintf_ullong(FILE *, ullong);
-
-#define CHECKPOINT (1024*1024) /* reporting frequency */
-static long countdown = CHECKPOINT;
-
-char *anal_usage = "";
-char *anal_version = VERSION;
-
-void
-initialize(argc,argv,envp)
- int argc;
- char **argv, envp;
-{
- unsigned i, j;
-
- /* Setup the trace */
- shade_trctl_trsize(sizeof(Trace));
-
- shade_trctl_it (IT_ANY, 1, 0, TC_I | TC_IH);
- shade_trctl_ih (IH_OR, 1, 0, TC_I | TC_IH | TC_RD);
- shade_trctl_ih (IH_ADD, 1, 0, TC_I | TC_IH | TC_RD | TC_RS1);
- shade_trctl_ih (IH_SETHI, 1, 0, TC_I | TC_IH | TC_RD);
- shade_trctl_it (IT_LOAD, 1, 0, TC_I | TC_IH | TC_RD | TC_RS1);
- shade_trctl_it (IT_ILOAD, 1, 0, TC_I | TC_IH | TC_RD | TC_RS1);
- shade_trctl_it (IT_STORE, 1, 0, TC_I | TC_IH | TC_RD | TC_RS1);
- shade_trctl_it (IT_ISTORE,1, 0, TC_I | TC_IH | TC_RD | TC_RS1);
- /* trace all non-annulled instructions (... 1, 0, ...); For
- them, we want the instruction text (TC_I), and the hashed
- opcode (TC_IH). For "or" instructions, we also want the
- contents the destination register that was written into
- (TC_RD). Etc.
- */
-
- /* init table */
- for (i = 0; i < TOTAL_ACTIVITIES; i++)
- for (j = 0; j < NIHASH; j++)
- info[i][j] = 0LL;
-}
-
-int analyze(argc,argv,envp)
- int argc;
- char **argv, envp;
-{
- Trace *tr;
- ullong i;
- uint16 ih;
- int32 rd, rs1;
-
- unsigned activity = (ACT_UNKNOWN - ACT_BASE);
- ullong pending_sethi = 0LL;
- ullong pending_or = 0LL;
- ullong activity_chgs = 0LL;
- int acctd_for;
-#define ACCT_FOR() acctd_for = 1
-
- for (i = 0LL; tr = shade_step(); i += 1LL) {
- acctd_for = 0;
- ih = tr->tr_ih;
-
- if ( ih == IH_OR && tr->tr_i.i_rd == ACTIVITY_REG) {
- rd = tr->tr_rd;
-
- info[EACT_TAILCALL][IH_OR] += pending_or;
- if ( pending_sethi ) {
- fprintf(stderr, "pending_sethi still set!\n");
- }
-
- if (activity == (ACT_GC - ACT_BASE)) { /* only GC_STOP will stop it */
- if (rd == ACT_GC_STOP) {
- activity = ACT_UNKNOWN - ACT_BASE;
- info[EACT_OVERHEAD][IH_OR] += 1LL;
- ACCT_FOR();
- } else {
- info[activity][IH_OR] += 1LL;
- ACCT_FOR();
- }
- } else {
- if (rd < ACT_BASE || rd >= (ACT_BASE+ACTIVITIES)) {
- info[activity][IH_OR] += 1LL;
- ACCT_FOR();
- } else {
- activity = rd - ACT_BASE; /* reset! */
- info[EACT_OVERHEAD][IH_OR] += 1LL;
- ACCT_FOR();
- }
- }
- activity_chgs += 1LL;
- pending_sethi = 0LL;
- pending_or = 0LL;
- /* reset other things? */
-
- } else if ( activity != EACT_TAILCALL ) { /* ordinary instruction */
- info[activity][ih] += 1LL;
- ACCT_FOR();
-
- } else { /* TAILCALLing */
-/* fprintf(stderr, "op=%d\n", ih); */
-
- switch (ih) {
- case IH_SETHI:
-/* if ( pending_sethi ) {
- fprintf(stderr, "pending_sethi already set!\n");
- }
-*/ pending_sethi += 1LL;
- ACCT_FOR();
- break;
- case IH_JMPL:
- case IH_CALL:
- case IH_NOP:
- info[EACT_CALL][ih] += 1LL;
- info[EACT_CALL][IH_SETHI] += pending_sethi; /* really mystery? */
- info[EACT_CALL][IH_OR] += pending_or; /* ditto? */
- pending_sethi = 0LL;
- pending_or = 0LL;
- ACCT_FOR();
- break;
-
- case IH_ADD:
- case IH_ADDCC:
- case IH_SUB:
- case IH_SUBCC:
- rd = tr->tr_i.i_rd;
- rs1 = tr->tr_i.i_rs1;
- if ( rd == NODE_REG || rd == INFO_REG ) {
- info[EACT_CALL][ih] += 1LL;
- info[EACT_CALL][IH_SETHI] += pending_sethi;
- info[EACT_CALL][IH_OR] += pending_or;
- pending_sethi = 0LL;
- pending_or = 0LL;
- ACCT_FOR();
-
- } else if (rd >= R3_REG && rd <= R7_REG) {
- info[EACT_ARGREGS][ih] += 1LL;
- info[EACT_ARGREGS][IH_SETHI] += pending_sethi;
- info[EACT_ARGREGS][IH_OR] += pending_or;
- pending_sethi = 0LL;
- pending_or = 0LL;
- ACCT_FOR();
-
- } else {
- info[EACT_TAILCALL][IH_SETHI] += pending_sethi;
- info[EACT_TAILCALL][IH_OR] += pending_or;
- pending_sethi = 0LL;
- pending_or = 0LL;
-
- if ( (rd == SpA_REG && rs1 == SpA_REG)
- || (rd == SpB_REG && rs1 == SpB_REG) ) {
- info[EACT_STKADJ][ih] += 1LL;
- ACCT_FOR();
-
- } else if ( rd >= I_REG_o0 && rd <= I_REG_o7 ) {
- info[EACT_TAILCALL][ih] += 1LL;
- ACCT_FOR();
-
- } else if ( rd == I_REG_g0
- && rs1 >= I_REG_o0 && rs1 <= I_REG_o7 ) {
- info[EACT_TAILCALL][ih] += 1LL;
- ACCT_FOR();
-
- } else if ( rd == I_REG_g3 && rs1 == I_REG_g3 ) {
- info[EACT_TAILCALL][ih] += 1LL;
- ACCT_FOR();
-
- } else {
- fprintf(stderr, "IH_ADD: mystery op (%d) rd=%d rs1=%d\n",
- ih, rd, rs1);
- }
- }
- break;
-
- case IH_OR:
- case IH_ORCC:
- rd = tr->tr_i.i_rd;
- if ( rd == RET_REG ) {
- info[EACT_RETREG][ih] += 1LL + pending_or;
- info[EACT_RETREG][IH_SETHI] += pending_sethi;
- pending_sethi = 0LL;
- pending_or = 0LL;
- ACCT_FOR();
-
- } else if ( rd == NODE_REG || rd == INFO_REG ) {
- info[EACT_CALL][ih] += 1LL + pending_or;
- info[EACT_CALL][IH_SETHI] += pending_sethi;
- pending_sethi = 0LL;
- pending_or = 0LL;
- ACCT_FOR();
-
- } else {
- pending_or += 1LL;
- ACCT_FOR();
- }
- break;
-
- case IH_LD:
- case IH_LDUB: /* ??? */
- case IH_ST:
- rs1 = tr->tr_i.i_rs1;
- if ( rs1 == SpA_REG ) {
- info[EACT_ASTK][ih] += 1LL;
- info[EACT_ASTK][IH_SETHI] += pending_sethi;
- info[EACT_ASTK][IH_OR] += pending_or;
- pending_sethi = 0LL;
- pending_or = 0LL;
- ACCT_FOR();
-
- } else if ( rs1 == SpB_REG ) {
- info[EACT_BSTK][ih] += 1LL;
- info[EACT_BSTK][IH_SETHI] += pending_sethi;
- info[EACT_BSTK][IH_OR] += pending_or;
- pending_sethi = 0LL;
- pending_or = 0LL;
- ACCT_FOR();
-
- } else if ( rs1 == NODE_REG ) {
- info[EACT_CALL][ih] += 1LL;
- info[EACT_CALL][IH_SETHI] += pending_sethi;
- info[EACT_CALL][IH_OR] += pending_or;
- pending_sethi = 0LL;
- pending_or = 0LL;
- ACCT_FOR();
-
- } else { /* random ld/st */
- info[EACT_TAILCALL][ih] += 1LL;
- info[EACT_TAILCALL][IH_SETHI] += pending_sethi;
- info[EACT_TAILCALL][IH_OR] += pending_or;
- pending_sethi = 0LL;
- pending_or = 0LL;
- ACCT_FOR();
- }
- break;
-
- case IH_AND: /* ??? */
- case IH_BA: /* ??? */
- case IH_BAA:
- case IH_BCC:
- case IH_BCS:
- case IH_BE:
- case IH_BGE:
- case IH_BL:
- case IH_BLA:
- case IH_BLEU:
- case IH_BNE:
- case IH_SLL:
- case IH_SRL:
- case IH_XOR:
- info[EACT_TAILCALL][ih] += 1LL;
- info[EACT_TAILCALL][IH_SETHI] += pending_sethi;
- info[EACT_TAILCALL][IH_OR] += pending_or;
- pending_sethi = 0LL;
- pending_or = 0LL;
- ACCT_FOR();
- break;
-
- default:
- fprintf(stderr, "mystery TAIL op = %d\n", ih);
- break;
- }
- }
-
- if (countdown-- < 0) {
- print_results("Intermediate:");
- countdown = CHECKPOINT;
- }
- if ( ! acctd_for ) {
- fprintf(stderr, "insn op=%d not acctd for!\n", ih);
- }
- }
- fprintf(stderr,"\n");
- fprintf_ullong(stderr,i);
- fprintf(stderr," iterations; ");
- fprintf_ullong(stderr,activity_chgs);
- fprintf(stderr," activity changes\n");
- return(0);
-}
-
-void
-terminate()
-{
- print_results("Final:");
-}
-
-void
-print_results(header)
- char *header;
-{
- int i, j;
- long total_slots = 0;
- ullong total_instrs = 0;
- static FILE *statf = NULL;
-
-/* fprintf(stderr, "Printing %s\n", header); */
-
- unlink(STATS_FILE);
- if ((statf = fopen(STATS_FILE, "w")) == NULL) {
- fprintf(stderr, "Cannot open statistics file %s\n",STATS_FILE);
- exit(1);
- }
- fprintf(statf, "%s\n\n", header);
-/* fprintf(statf, "annulled insns = ");
- fprintf_ullong(statf, annulled_insns);
-*/ fprintf(statf, "\n\n");
-
- for (i = 0; i < NIHASH; i++) {
- fprintf(statf, "%8d:", i);
- for (j = 0; j < TOTAL_ACTIVITIES; j++) {
- fprintf(statf, " ");
- fprintf_ullong(statf, info[j][i]);
- total_slots++;
- total_instrs += info[j][i];
- }
- fprintf(statf, "\n");
- }
- fprintf(statf, "total slots=%ld, total instructions=", total_slots);
- fprintf_ullong(statf, total_instrs);
- fprintf(statf, "\n");
-
- fclose(statf);
-}
-
-void
-fprintf_ullong(FILE *filep, ullong x)
-{
- if (x < (ullong)1000)
- fprintf(filep, "%ld", (I_)x);
- else if (x < (ullong)1000000)
- fprintf(filep, "%ld%3.3ld",
- (I_)((x)/(ullong)1000),
- (I_)((x)%(ullong)1000));
- else if (x < (ullong)1000000000)
- fprintf(filep, "%ld%3.3ld%3.3ld",
- (I_)((x)/(ullong)1000000),
- (I_)((x)/(ullong)1000%(ullong)1000),
- (I_)((x)%(ullong)1000));
- else
- fprintf(filep, "%ld%3.3ld%3.3ld%3.3ld",
- (I_)((x)/(ullong)1000000000),
- (I_)((x)/(ullong)1000000%(ullong)1000),
- (I_)((x)/(ullong)1000%(ullong)1000),
- (I_)((x)%(ullong)1000));
-}
diff --git a/ghc/misc/spat-analysers/makefile b/ghc/misc/spat-analysers/makefile
deleted file mode 100644
index 652f9df04d..0000000000
--- a/ghc/misc/spat-analysers/makefile
+++ /dev/null
@@ -1,19 +0,0 @@
-# SHADE presumably set by an environment variable
-#
-SPIX = ${SHADE}/spixtools
-#
-SHADE_L = ${SHADE}/lib
-SPIX_L = ${SPIX}/lib
-
-SHADE_H = ${SHADE}/src/include
-SPIX_H = ${SPIX}/src/include
-GHC_H = ${bghc}/includes
-
-CC=gcc
-CFLAGS = -I${GHC_H} -I${SHADE_H} -I${SPIX_H} -g -O -ansi
-
-.c.o:
- ${CC} ${CFLAGS} -c $*.c
-
-.o:
- ${CC} -static -o $* spatmain.o $*.o ${SHADE_L}/libshade.a ${SPIX_L}/libspix.a
diff --git a/ghc/misc/spat-analysers/show_icounts b/ghc/misc/spat-analysers/show_icounts
deleted file mode 100644
index faca1f2a22..0000000000
--- a/ghc/misc/spat-analysers/show_icounts
+++ /dev/null
@@ -1,354 +0,0 @@
-#! /usr/local/bin/perl
-#
-%Datum = ();
-
-&init();
-
-$BigTotal = 0;
-
-while (<>) {
- chop;
- next if ! /^\s*(\d+): (.*)/;
- $op_code = $1;
- @num = split(/\s+/, $2);
-
- $op_category = $Opcode2Cat{$op_code};
-
- die "num = $#num\n" if $#num != 21;
-
- for($i = 0; $i <= $#num; $i++) {
- next if $num[$i] == 0;
-
- $act = $ActivityName[$i];
-
- $Datum{"$act/$op_category"} += $num[$i];
- $TotPerCat{$op_category} += $num[$i];
- $BigTotal += $num[$i];
- }
-}
-
-#print a header
-printf STDOUT "%12s", "";
-foreach $opcat (@ListOfCats) { printf STDOUT " %11s", $opcat; }
-printf STDOUT " %11s %s\n", 'TOTAL', " \%age";
-
-# print the collected goods
-%tot_for_opcat = ();
-foreach $act ( @ListOfActivities ) {
- printf STDOUT "%-12s", $act;
- $tot_for_act = 0;
-
- foreach $opcat (@ListOfCats) {
- $datum = $Datum{"$act/$opcat"};
- printf STDOUT " %11s", &commas($datum);
- $tot_for_act += $datum;
- $tot_for_opcat{$opcat} += $datum;
- }
- printf STDOUT "%12s %5.1f%%\n", &commas($tot_for_act), (($tot_for_act / $BigTotal) * 100.0);
-}
-
-foreach $k ( keys %TotPerCat ) {
- die "category ($k) totals don't match: $TotPerCat{$k} != $tot_for_opcat{$k}\n"
- if $TotPerCat{$k} != $tot_for_opcat{$k};
-}
-foreach $k ( keys %tot_for_opcat ) {
- die "category ($k) totals don't match: $TotPerCat{$k} != $tot_for_opcat{$k}\n"
- if $TotPerCat{$k} != $tot_for_opcat{$k};
-}
-
-#print totals by category and percentages
-printf STDOUT "\n%-12s", '*Totals*';
-$tot_to_chk = 0;
-foreach $opcat (@ListOfCats) {
- printf STDOUT " %11s", &commas($TotPerCat{$opcat});
- $tot_to_chk += $TotPerCat{$opcat};
-}
-
-printf STDOUT "%12s\n%-12s", &commas($BigTotal), '';
-
-die "Totals don't match: $tot_to_chk != $BigTotal\n" if $tot_to_chk != $BigTotal;
-
-foreach $opcat (@ListOfCats) {
- printf STDOUT " %10.1f%%", (($TotPerCat{$opcat} / $BigTotal) * 100.0);
-}
-printf STDOUT "\n";
-
-# utils:
-
-sub commas { # put commas into long integer numbers
- local($_) = @_;
-
- s/^\+//;
-
- s/(\d)(\d\d\d)$/$1,$2/;
- while ( /\d\d\d\d,/ ) {
- s/(\d)(\d\d\d)\,/$1,$2,/;
- }
- $_;
-}
-
-sub init {
- # order is important!
- @ActivityName = ( 'UNKNOWN', 'GC', 'OTHER_REDN', 'ASTK_STUB',
- 'FILL_IN_HEAP', 'HEAP_CHK', 'RETURN',
- 'UPDATE', 'PUSH_UPDF', 'ARGS_CHK', 'UPDATE_PAP',
- 'INDIRECT', 'XXX_12', 'XXX_13', 'OVERHEAD', 'TAILCALL',
- 'CALL', 'STKADJ', 'ASTK', 'BSTK', 'RETREG', 'ARGREGS' );
-
- @ListOfActivities = ( # print order
- 'ASTK_STUB', 'FILL_IN_HEAP', 'HEAP_CHK',
- 'RETURN', 'TAILCALL', 'UPDATE', 'PUSH_UPDF', 'UPDATE_PAP',
- 'INDIRECT', 'ARGS_CHK',
- 'CALL', 'STKADJ', 'ASTK', 'BSTK', 'RETREG', 'ARGREGS',
- 'OTHER_REDN', 'GC', 'UNKNOWN', 'OVERHEAD' );
-
- @ListOfCats = ('LD', 'ST', 'ARITH', 'BR', 'SETHI', 'NOP', 'OTHER'); # 'FL-PT',
- %Opcode2Cat = ();
-
- $Opcode2Cat{'0'} = 'ARITH'; # IH_ADD
- $Opcode2Cat{'1'} = 'ARITH'; # IH_ADDCC
- $Opcode2Cat{'2'} = 'ARITH'; # IH_ADDX
- $Opcode2Cat{'3'} = 'ARITH'; # IH_ADDXCC
- $Opcode2Cat{'4'} = 'ARITH'; # IH_AND
- $Opcode2Cat{'5'} = 'ARITH'; # IH_ANDCC
- $Opcode2Cat{'6'} = 'ARITH'; # IH_ANDN
- $Opcode2Cat{'7'} = 'ARITH'; # IH_ANDNCC
- $Opcode2Cat{'8'} = 'BR'; # IH_BA
- $Opcode2Cat{'9'} = 'BR'; # IH_BAA
- $Opcode2Cat{'10'} = 'BR'; # IH_BCC
- $Opcode2Cat{'11'} = 'BR'; # IH_BCCA
- $Opcode2Cat{'12'} = 'BR'; # IH_BCS
- $Opcode2Cat{'13'} = 'BR'; # IH_BCSA
- $Opcode2Cat{'14'} = 'BR'; # IH_BE
- $Opcode2Cat{'15'} = 'BR'; # IH_BEA
- $Opcode2Cat{'16'} = 'BR'; # IH_BG
- $Opcode2Cat{'17'} = 'BR'; # IH_BGA
- $Opcode2Cat{'18'} = 'BR'; # IH_BGE
- $Opcode2Cat{'19'} = 'BR'; # IH_BGEA
- $Opcode2Cat{'20'} = 'BR'; # IH_BGU
- $Opcode2Cat{'21'} = 'BR'; # IH_BGUA
- $Opcode2Cat{'22'} = 'BR'; # IH_BL
- $Opcode2Cat{'23'} = 'BR'; # IH_BLA
- $Opcode2Cat{'24'} = 'BR'; # IH_BLE
- $Opcode2Cat{'25'} = 'BR'; # IH_BLEA
- $Opcode2Cat{'26'} = 'BR'; # IH_BLEU
- $Opcode2Cat{'27'} = 'BR'; # IH_BLEUA
- $Opcode2Cat{'28'} = 'BR'; # IH_BN
- $Opcode2Cat{'29'} = 'BR'; # IH_BNA
- $Opcode2Cat{'30'} = 'BR'; # IH_BNE
- $Opcode2Cat{'31'} = 'BR'; # IH_BNEA
- $Opcode2Cat{'32'} = 'BR'; # IH_BNEG
- $Opcode2Cat{'33'} = 'BR'; # IH_BNEGA
- $Opcode2Cat{'34'} = 'BR'; # IH_BPOS
- $Opcode2Cat{'35'} = 'BR'; # IH_BPOSA
- $Opcode2Cat{'36'} = 'BR'; # IH_BVC
- $Opcode2Cat{'37'} = 'BR'; # IH_BVCA
- $Opcode2Cat{'38'} = 'BR'; # IH_BVS
- $Opcode2Cat{'39'} = 'BR'; # IH_BVSA
- $Opcode2Cat{'40'} = 'BR'; # IH_CALL
- $Opcode2Cat{'41'} = 'OTHER'; # IH_CB0
- $Opcode2Cat{'42'} = 'OTHER'; # IH_CB0A
- $Opcode2Cat{'43'} = 'OTHER'; # IH_CB01
- $Opcode2Cat{'44'} = 'OTHER'; # IH_CB01A
- $Opcode2Cat{'45'} = 'OTHER'; # IH_CB012
- $Opcode2Cat{'46'} = 'OTHER'; # IH_CB012A
- $Opcode2Cat{'47'} = 'OTHER'; # IH_CB013
- $Opcode2Cat{'48'} = 'OTHER'; # IH_CB013A
- $Opcode2Cat{'49'} = 'OTHER'; # IH_CB02
- $Opcode2Cat{'50'} = 'OTHER'; # IH_CB02A
- $Opcode2Cat{'51'} = 'OTHER'; # IH_CB023
- $Opcode2Cat{'52'} = 'OTHER'; # IH_CB023A
- $Opcode2Cat{'53'} = 'OTHER'; # IH_CB03
- $Opcode2Cat{'54'} = 'OTHER'; # IH_CB03A
- $Opcode2Cat{'55'} = 'OTHER'; # IH_CB1
- $Opcode2Cat{'56'} = 'OTHER'; # IH_CB1A
- $Opcode2Cat{'57'} = 'OTHER'; # IH_CB12
- $Opcode2Cat{'58'} = 'OTHER'; # IH_CB12A
- $Opcode2Cat{'59'} = 'OTHER'; # IH_CB123
- $Opcode2Cat{'60'} = 'OTHER'; # IH_CB123A
- $Opcode2Cat{'61'} = 'OTHER'; # IH_CB13
- $Opcode2Cat{'62'} = 'OTHER'; # IH_CB13A
- $Opcode2Cat{'63'} = 'OTHER'; # IH_CB2
- $Opcode2Cat{'64'} = 'OTHER'; # IH_CB2A
- $Opcode2Cat{'65'} = 'OTHER'; # IH_CB23
- $Opcode2Cat{'66'} = 'OTHER'; # IH_CB23A
- $Opcode2Cat{'67'} = 'OTHER'; # IH_CB3
- $Opcode2Cat{'68'} = 'OTHER'; # IH_CB3A
- $Opcode2Cat{'69'} = 'OTHER'; # IH_CBA
- $Opcode2Cat{'70'} = 'OTHER'; # IH_CBAA
- $Opcode2Cat{'71'} = 'OTHER'; # IH_CBN
- $Opcode2Cat{'72'} = 'OTHER'; # IH_CBNA
- $Opcode2Cat{'73'} = 'OTHER'; # IH_CPOP1
- $Opcode2Cat{'74'} = 'OTHER'; # IH_CPOP2
- $Opcode2Cat{'75'} = 'OTHER'; # 'FL-PT'; # IH_FABSS
- $Opcode2Cat{'76'} = 'OTHER'; # 'FL-PT'; # IH_FADDD
- $Opcode2Cat{'77'} = 'OTHER'; # 'FL-PT'; # IH_FADDQ
- $Opcode2Cat{'78'} = 'OTHER'; # 'FL-PT'; # IH_FADDS
- $Opcode2Cat{'79'} = 'OTHER'; # 'FL-PT'; # IH_FBA
- $Opcode2Cat{'80'} = 'OTHER'; # 'FL-PT'; # IH_FBAA
- $Opcode2Cat{'81'} = 'OTHER'; # 'FL-PT'; # IH_FBE
- $Opcode2Cat{'82'} = 'OTHER'; # 'FL-PT'; # IH_FBEA
- $Opcode2Cat{'83'} = 'OTHER'; # 'FL-PT'; # IH_FBG
- $Opcode2Cat{'84'} = 'OTHER'; # 'FL-PT'; # IH_FBGA
- $Opcode2Cat{'85'} = 'OTHER'; # 'FL-PT'; # IH_FBGE
- $Opcode2Cat{'86'} = 'OTHER'; # 'FL-PT'; # IH_FBGEA
- $Opcode2Cat{'87'} = 'OTHER'; # 'FL-PT'; # IH_FBL
- $Opcode2Cat{'88'} = 'OTHER'; # 'FL-PT'; # IH_FBLA
- $Opcode2Cat{'89'} = 'OTHER'; # 'FL-PT'; # IH_FBLE
- $Opcode2Cat{'90'} = 'OTHER'; # 'FL-PT'; # IH_FBLEA
- $Opcode2Cat{'91'} = 'OTHER'; # 'FL-PT'; # IH_FBLG
- $Opcode2Cat{'92'} = 'OTHER'; # 'FL-PT'; # IH_FBLGA
- $Opcode2Cat{'93'} = 'OTHER'; # 'FL-PT'; # IH_FBN
- $Opcode2Cat{'94'} = 'OTHER'; # 'FL-PT'; # IH_FBNA
- $Opcode2Cat{'95'} = 'OTHER'; # 'FL-PT'; # IH_FBNE
- $Opcode2Cat{'96'} = 'OTHER'; # 'FL-PT'; # IH_FBNEA
- $Opcode2Cat{'97'} = 'OTHER'; # 'FL-PT'; # IH_FBO
- $Opcode2Cat{'98'} = 'OTHER'; # 'FL-PT'; # IH_FBOA
- $Opcode2Cat{'99'} = 'OTHER'; # 'FL-PT'; # IH_FBU
- $Opcode2Cat{'100'} = 'OTHER'; # 'FL-PT'; # IH_FBUA
- $Opcode2Cat{'101'} = 'OTHER'; # 'FL-PT'; # IH_FBUE
- $Opcode2Cat{'102'} = 'OTHER'; # 'FL-PT'; # IH_FBUEA
- $Opcode2Cat{'103'} = 'OTHER'; # 'FL-PT'; # IH_FBUG
- $Opcode2Cat{'104'} = 'OTHER'; # 'FL-PT'; # IH_FBUGA
- $Opcode2Cat{'105'} = 'OTHER'; # 'FL-PT'; # IH_FBUGE
- $Opcode2Cat{'106'} = 'OTHER'; # 'FL-PT'; # IH_FBUGEA
- $Opcode2Cat{'107'} = 'OTHER'; # 'FL-PT'; # IH_FBUL
- $Opcode2Cat{'108'} = 'OTHER'; # 'FL-PT'; # IH_FBULA
- $Opcode2Cat{'109'} = 'OTHER'; # 'FL-PT'; # IH_FBULE
- $Opcode2Cat{'110'} = 'OTHER'; # 'FL-PT'; # IH_FBULEA
- $Opcode2Cat{'111'} = 'OTHER'; # 'FL-PT'; # IH_FCMPD
- $Opcode2Cat{'112'} = 'OTHER'; # 'FL-PT'; # IH_FCMPED
- $Opcode2Cat{'113'} = 'OTHER'; # 'FL-PT'; # IH_FCMPEQ
- $Opcode2Cat{'114'} = 'OTHER'; # 'FL-PT'; # IH_FCMPES
- $Opcode2Cat{'115'} = 'OTHER'; # 'FL-PT'; # IH_FCMPQ
- $Opcode2Cat{'116'} = 'OTHER'; # 'FL-PT'; # IH_FCMPS
- $Opcode2Cat{'117'} = 'OTHER'; # 'FL-PT'; # IH_FDIVD
- $Opcode2Cat{'118'} = 'OTHER'; # 'FL-PT'; # IH_FDIVQ
- $Opcode2Cat{'119'} = 'OTHER'; # 'FL-PT'; # IH_FDIVS
- $Opcode2Cat{'120'} = 'OTHER'; # 'FL-PT'; # IH_FDMULQ
- $Opcode2Cat{'121'} = 'OTHER'; # 'FL-PT'; # IH_FDTOI
- $Opcode2Cat{'122'} = 'OTHER'; # 'FL-PT'; # IH_FDTOQ
- $Opcode2Cat{'123'} = 'OTHER'; # 'FL-PT'; # IH_FDTOS
- $Opcode2Cat{'124'} = 'OTHER'; # 'FL-PT'; # IH_FITOD
- $Opcode2Cat{'125'} = 'OTHER'; # 'FL-PT'; # IH_FITOQ
- $Opcode2Cat{'126'} = 'OTHER'; # 'FL-PT'; # IH_FITOS
- $Opcode2Cat{'127'} = 'OTHER'; # IH_FLUSH
- $Opcode2Cat{'128'} = 'OTHER'; # 'FL-PT'; # IH_FMOVS
- $Opcode2Cat{'129'} = 'OTHER'; # 'FL-PT'; # IH_FMULD
- $Opcode2Cat{'130'} = 'OTHER'; # 'FL-PT'; # IH_FMULQ
- $Opcode2Cat{'131'} = 'OTHER'; # 'FL-PT'; # IH_FMULS
- $Opcode2Cat{'132'} = 'OTHER'; # 'FL-PT'; # IH_FNEGS
- $Opcode2Cat{'133'} = 'OTHER'; # 'FL-PT'; # IH_FQTOD
- $Opcode2Cat{'134'} = 'OTHER'; # 'FL-PT'; # IH_FQTOI
- $Opcode2Cat{'135'} = 'OTHER'; # 'FL-PT'; # IH_FQTOS
- $Opcode2Cat{'136'} = 'OTHER'; # 'FL-PT'; # IH_FSMULD
- $Opcode2Cat{'137'} = 'OTHER'; # 'FL-PT'; # IH_FSQRTD
- $Opcode2Cat{'138'} = 'OTHER'; # 'FL-PT'; # IH_FSQRTQ
- $Opcode2Cat{'139'} = 'OTHER'; # 'FL-PT'; # IH_FSQRTS
- $Opcode2Cat{'140'} = 'OTHER'; # 'FL-PT'; # IH_FSTOD
- $Opcode2Cat{'141'} = 'OTHER'; # 'FL-PT'; # IH_FSTOI
- $Opcode2Cat{'142'} = 'OTHER'; # 'FL-PT'; # IH_FSTOQ
- $Opcode2Cat{'143'} = 'OTHER'; # 'FL-PT'; # IH_FSUBD
- $Opcode2Cat{'144'} = 'OTHER'; # 'FL-PT'; # IH_FSUBQ
- $Opcode2Cat{'145'} = 'OTHER'; # 'FL-PT'; # IH_FSUBS
- $Opcode2Cat{'146'} = 'BR'; # IH_JMPL
- $Opcode2Cat{'147'} = 'LD'; # IH_LD
- $Opcode2Cat{'148'} = 'LD'; # IH_LDA
- $Opcode2Cat{'149'} = 'LD'; # IH_LDC
- $Opcode2Cat{'150'} = 'LD'; # IH_LDCSR
- $Opcode2Cat{'151'} = 'LD'; # IH_LDD
- $Opcode2Cat{'152'} = 'LD'; # IH_LDDA
- $Opcode2Cat{'153'} = 'LD'; # IH_LDDC
- $Opcode2Cat{'154'} = 'LD'; # IH_LDDF
- $Opcode2Cat{'155'} = 'LD'; # IH_LDF
- $Opcode2Cat{'156'} = 'LD'; # IH_LDFSR
- $Opcode2Cat{'157'} = 'LD'; # IH_LDSB
- $Opcode2Cat{'158'} = 'LD'; # IH_LDSBA
- $Opcode2Cat{'159'} = 'LD'; # IH_LDSH
- $Opcode2Cat{'160'} = 'LD'; # IH_LDSHA
- $Opcode2Cat{'161'} = 'LD'; # IH_LDSTUB
- $Opcode2Cat{'162'} = 'LD'; # IH_LDSTUBA
- $Opcode2Cat{'163'} = 'LD'; # IH_LDUB
- $Opcode2Cat{'164'} = 'LD'; # IH_LDUBA
- $Opcode2Cat{'165'} = 'LD'; # IH_LDUH
- $Opcode2Cat{'166'} = 'LD'; # IH_LDUHA
- $Opcode2Cat{'167'} = 'ARITH'; # IH_MULSCC
- $Opcode2Cat{'168'} = 'NOP'; # IH_NOP
- $Opcode2Cat{'169'} = 'ARITH'; # IH_OR
- $Opcode2Cat{'170'} = 'ARITH'; # IH_ORCC
- $Opcode2Cat{'171'} = 'ARITH'; # IH_ORN
- $Opcode2Cat{'172'} = 'ARITH'; # IH_ORNCC
- $Opcode2Cat{'173'} = 'OTHER'; # IH_RDASR
- $Opcode2Cat{'174'} = 'OTHER'; # IH_RDPSR
- $Opcode2Cat{'175'} = 'OTHER'; # IH_RDTBR
- $Opcode2Cat{'176'} = 'OTHER'; # IH_RDWIM
- $Opcode2Cat{'177'} = 'OTHER'; # IH_RDY
- $Opcode2Cat{'178'} = 'OTHER'; # IH_RESTORE
- $Opcode2Cat{'179'} = 'OTHER'; # IH_RETT
- $Opcode2Cat{'180'} = 'OTHER'; # IH_SAVE
- $Opcode2Cat{'181'} = 'ARITH'; # IH_SDIV
- $Opcode2Cat{'182'} = 'ARITH'; # IH_SDIVCC
- $Opcode2Cat{'183'} = 'SETHI'; # IH_SETHI
- $Opcode2Cat{'184'} = 'ARITH'; # IH_SLL
- $Opcode2Cat{'185'} = 'ARITH'; # IH_SMUL
- $Opcode2Cat{'186'} = 'ARITH'; # IH_SMULCC
- $Opcode2Cat{'187'} = 'ARITH'; # IH_SRA
- $Opcode2Cat{'188'} = 'ARITH'; # IH_SRL
- $Opcode2Cat{'189'} = 'ST'; # IH_ST
- $Opcode2Cat{'190'} = 'ST'; # IH_STA
- $Opcode2Cat{'191'} = 'ST'; # IH_STB
- $Opcode2Cat{'192'} = 'ST'; # IH_STBA
- $Opcode2Cat{'193'} = 'ST'; # IH_STBAR
- $Opcode2Cat{'194'} = 'ST'; # IH_STC
- $Opcode2Cat{'195'} = 'ST'; # IH_STCSR
- $Opcode2Cat{'196'} = 'ST'; # IH_STD
- $Opcode2Cat{'197'} = 'ST'; # IH_STDA
- $Opcode2Cat{'198'} = 'ST'; # IH_STDC
- $Opcode2Cat{'199'} = 'ST'; # IH_STDCQ
- $Opcode2Cat{'200'} = 'ST'; # IH_STDF
- $Opcode2Cat{'201'} = 'ST'; # IH_STDFQ
- $Opcode2Cat{'202'} = 'ST'; # IH_STF
- $Opcode2Cat{'203'} = 'ST'; # IH_STFSR
- $Opcode2Cat{'204'} = 'ST'; # IH_STH
- $Opcode2Cat{'205'} = 'ST'; # IH_STHA
- $Opcode2Cat{'206'} = 'ARITH'; # IH_SUB
- $Opcode2Cat{'207'} = 'ARITH'; # IH_SUBCC
- $Opcode2Cat{'208'} = 'ARITH'; # IH_SUBX
- $Opcode2Cat{'209'} = 'ARITH'; # IH_SUBXCC
- $Opcode2Cat{'210'} = 'OTHER'; # IH_SWAP
- $Opcode2Cat{'211'} = 'OTHER'; # IH_SWAPA
- $Opcode2Cat{'212'} = 'OTHER'; # IH_TA
- $Opcode2Cat{'213'} = 'OTHER'; # IH_TADDCC
- $Opcode2Cat{'214'} = 'OTHER'; # IH_TADDCCTV
- $Opcode2Cat{'215'} = 'OTHER'; # IH_TCC
- $Opcode2Cat{'216'} = 'OTHER'; # IH_TCS
- $Opcode2Cat{'217'} = 'OTHER'; # IH_TE
- $Opcode2Cat{'218'} = 'OTHER'; # IH_TG
- $Opcode2Cat{'219'} = 'OTHER'; # IH_TGE
- $Opcode2Cat{'220'} = 'OTHER'; # IH_TGU
- $Opcode2Cat{'221'} = 'OTHER'; # IH_TL
- $Opcode2Cat{'222'} = 'OTHER'; # IH_TLE
- $Opcode2Cat{'223'} = 'OTHER'; # IH_TLEU
- $Opcode2Cat{'224'} = 'OTHER'; # IH_TN
- $Opcode2Cat{'225'} = 'OTHER'; # IH_TNE
- $Opcode2Cat{'226'} = 'OTHER'; # IH_TNEG
- $Opcode2Cat{'227'} = 'OTHER'; # IH_TPOS
- $Opcode2Cat{'228'} = 'OTHER'; # IH_TSUBCC
- $Opcode2Cat{'229'} = 'OTHER'; # IH_TSUBCCTV
- $Opcode2Cat{'230'} = 'OTHER'; # IH_TVC
- $Opcode2Cat{'231'} = 'OTHER'; # IH_TVS
- $Opcode2Cat{'232'} = 'ARITH'; # IH_UDIV
- $Opcode2Cat{'233'} = 'ARITH'; # IH_UDIVCC
- $Opcode2Cat{'234'} = 'ARITH'; # IH_UMUL
- $Opcode2Cat{'235'} = 'ARITH'; # IH_UMULCC
- $Opcode2Cat{'236'} = 'OTHER'; # IH_UNIMP
- $Opcode2Cat{'237'} = 'OTHER'; # IH_WRASR
- $Opcode2Cat{'238'} = 'OTHER'; # IH_WRPSR
- $Opcode2Cat{'239'} = 'OTHER'; # IH_WRTBR
- $Opcode2Cat{'240'} = 'OTHER'; # IH_WRWIM
- $Opcode2Cat{'241'} = 'OTHER'; # IH_WRY
- $Opcode2Cat{'242'} = 'ARITH'; # IH_XNOR
- $Opcode2Cat{'243'} = 'ARITH'; # IH_XNORCC
- $Opcode2Cat{'244'} = 'ARITH'; # IH_XOR
- $Opcode2Cat{'245'} = 'ARITH'; # IH_XORCC
-}
diff --git a/ghc/misc/spat-analysers/spatmain.c b/ghc/misc/spat-analysers/spatmain.c
deleted file mode 100644
index 2c6ec1912a..0000000000
--- a/ghc/misc/spat-analysers/spatmain.c
+++ /dev/null
@@ -1,243 +0,0 @@
-#include <stdio.h>
-#include <varargs.h>
-#include <sys/time.h>
-#include <sys/resource.h>
-
-#define TVTIME(tv) ((tv).tv_sec + (tv).tv_usec / 1e6)
-
-
-extern char *anal_usage, *anal_version,
- *shade_bench_path, *shade_ego, *shade_version,
- *shade_argtrange();
-
-extern char *ctime();
-extern int analyze();
-extern long time();
-extern void exit(), initialize(), terminate();
-
-
-FILE *statsfp; /* output stats file */
-double nina; /* # non-annulled instructions executed */
-
-
-static double usr, sys, real;
-static int t_flag,
- main_stats_analyze();
-static void main_stats_start(),
- main_stats_stop();
-
-
-int
-shade_main (argc, argv, envp)
- int argc;
- char **argv, **envp;
-{
- int aargc, ec, i, j, pid = getpid();
- char **aargv, *cmd = 0, *x;
-
- argc = shade_splitargs (argv, &aargv, &aargc);
-
- for (i = j = 1; i < argc; i++)
- if (argv[i][0] == '-' ||
- argv[i][0] == '+' && argv[i][1] == 't')
- switch (argv[i][1]) {
- case 'c':
- if (cmd)
- usage ("too many -c options");
- if (aargc > 0)
- usage ("-c not allowed with --");
- if (argv[i][2] || ++i >= argc)
- usage
- ("-c: missing/misplaced command");
- cmd = argv[i];
- break;
- case 'o':
- if (statsfp)
- shade_fatal ("too many -o's");
- if (argv[i][2] || ++i >= argc)
- usage
- ("-o: missing/misplaced file name");
- statsfp = fopen (argv[i], "w");
- if (!statsfp)
- usage ("%s: can't open", argv[i]);
- break;
- case 't':
- if (!t_flag++)
- (void) shade_argtrange (argv[i][0] ==
- '-' ? "+t," : "-t,");
- if (x = shade_argtrange (argv[i]))
- usage ("%s: %s", argv[i], x);
- /* should print tranges */
- break;
- case 'U':
- usage ("");
- return (0);
- case 'V':
- fprintf (stderr, "%s: version: %s\n",
- argv[0], anal_version);
- fprintf (stderr, "shade version: %s\n",
- shade_version);
- return (0);
- default:
- argv[j++] = argv[i];
- break;
- }
- else argv[j++] = argv[i];
-
- if (!statsfp)
- statsfp = stdout;
-
- argv[argc = j] = 0;
- initialize (argc, argv, envp);
-
- main_stats_start();
-
- if (cmd)
- ec = shade_sshell (cmd, main_stats_analyze);
- else if (aargc <= 0)
- ec = shade_shell (main_stats_analyze);
- else if (0 > shade_loadp (*aargv, aargv, envp))
- ec = 1;
- else ec = main_stats_analyze (aargc, aargv, envp, (char **) 0);
-
- if (pid == getpid()) {
- main_stats_stop();
- terminate();
- }
- return (ec);
-}
-
-
-usage (va_alist)
- va_dcl
-{
- char *fmt;
- va_list ap;
-
- va_start (ap);
- fmt = va_arg (ap, char *);
- if (fmt && *fmt) {
- fprintf (stderr, "%s: ", shade_ego);
- vfprintf (stderr, fmt, ap);
- fprintf (stderr, "\n\n");
- }
- va_end (ap);
-
- fprintf (stderr, "usage: %s [-U] [-V] [-o outfile] [+/-t[from],[to]] ",
- shade_ego);
- if (anal_usage && *anal_usage)
- fprintf (stderr, "\\\n\t%s ", anal_usage);
- fprintf (stderr, "\\\n\t[-c \"command\" | -- bench benchargs]\n");
-
- exit (1);
-}
-
-
-static void
-getcputime (usr, sys)
- double *usr, *sys;
-{
- struct rusage ru;
-
- if (-1 == getrusage (RUSAGE_SELF, &ru))
- *usr = *sys = 0.0;
- else {
- *usr = TVTIME (ru.ru_utime) - *usr;
- *sys = TVTIME (ru.ru_stime) - *sys;
- }
-}
-
-
-static void
-getrealtime (real)
- double *real;
-{
- struct timeval tv;
- struct timezone tz;
-
- tz.tz_dsttime = DST_NONE;
- tz.tz_minuteswest = 0;
-
- (void) gettimeofday (&tv, &tz);
-
- *real = TVTIME (tv) - *real;
-}
-
-
-static void
-main_stats_start()
-{
- long start;
-
- if (statsfp == 0)
- return;
-
- fprintf (statsfp, "Analyzer: %s\n", shade_ego);
- fprintf (statsfp, "Version: %s (shade version: %s)\n",
- anal_version, shade_version);
-
- {
- char host[64];
-
- if (-1 != gethostname (host, sizeof host))
- fprintf (statsfp, "Hostname: %s\n", host);
- }
-
- (void) time (&start);
- getrealtime (&real);
- getcputime (&usr, &sys);
-
- fprintf (statsfp, "Start: %s", ctime (&start));
- fflush (statsfp);
-}
-
-
-static int
-main_stats_analyze (argc, argv, envp, iov)
- int argc;
- char **argv, **envp, **iov;
-{
- int i;
-
- /* BUG: if t_flag, shouldn't change application program */
-
- if (statsfp) {
- fprintf (statsfp, "Application: %s", shade_bench_path);
- for (i = 1; i < argc; i++)
- fprintf (statsfp, " %s", argv[i]);
- if (iov)
- for (i = 0; iov[i]; i += 2) {
- fprintf (statsfp, " %s", iov[i]);
- if (iov[i+1])
- fprintf (statsfp, " %s", iov[i+1]);
- }
- fprintf (statsfp, "\n");
- fflush (statsfp);
- }
-
- return (analyze());
-}
-
-
-static void
-main_stats_stop()
-{
- long stop;
-
- if (statsfp == 0)
- return;
-
- (void) time (&stop);
- getcputime (&usr, &sys);
- getrealtime (&real);
-
- fprintf (statsfp, "Stop: %s", ctime (&stop));
- if (nina > 0)
- fprintf (statsfp, "Instructions: %.0f\n", nina);
- fprintf (statsfp, "Time: %.3f usr %.3f sys %.3f real %.3f%%\n",
- usr, sys, real,
- real > 0 ? 100. * (usr + sys) / real : 100.);
- if (usr + sys > 0 && nina > 0)
- fprintf (statsfp, "Speed: %.3f KIPS\n",
- nina / (usr + sys) / 1000.);
-}
diff --git a/ghc/misc/spat-analysers/stgregs.c b/ghc/misc/spat-analysers/stgregs.c
deleted file mode 100644
index 61d6204132..0000000000
--- a/ghc/misc/spat-analysers/stgregs.c
+++ /dev/null
@@ -1,121 +0,0 @@
-#include <stdio.h>
-#include <sparc.h>
-
-#include "StgRegAddrs.h"
-
-#define CHECKPOINT 1000000 /* reporting frequency */
-static int countdown = CHECKPOINT;
-
-struct regcount {
- char *str;
- int load;
- int store;
-} info[] = {
- {"SpA", 0, 0},
- {"SpB", 0, 0},
- {"Hp", 0, 0},
- {"HpLim", 0, 0},
- {"SuA", 0, 0},
- {"SuB", 0, 0},
- {"UpdReg", 0, 0},
- {"RetVecReg", 0, 0},
- {"TagReg", 0, 0},
- {"Ret1", 0, 0},
- {"Ret2", 0, 0},
- {"Ret3", 0, 0},
- {"Ret4", 0, 0},
- {"Ret5", 0, 0},
- {"Ret6", 0, 0},
- {"Ret7", 0, 0},
- {"Ret8", 0, 0},
- {0, 0, 0}
-};
-
-void
-printregs(msg)
-char *msg;
-{
- FILE *output;
- int i;
- if ((output = fopen("REGSTATS", "w")) == 0)
- syserr("cannot open statistics file REGSTATS\n");
-
- fprintf(output, "%s\n", msg);
- for (i = 0; info[i].str; i++) {
- fprintf(output, "%-16.16s %8d %8d\n",
- info[i].str, info[i].load, info[i].store);
- }
- fclose(output);
-}
-
-#define RECORD(i) \
- if ( (OP3(t->iw)&014) == 004) { \
- info[i].store++; \
- } else { \
- info[i].load++; \
- } \
- /* fprintf(stderr, "%s\n", info[i].str); */ \
- break
-
-void
-analyze (t, tend)
- TRACE *t, *tend;
-{
- countdown -= tend-t;
-
- for (; t < tend; t++) {
- if (OP(t->iw) == 3 && /* Load/store; (OP3(t->iw)&014)==004) => store */
- !(t->flags & ANNULLED)) {
- unsigned a = (unsigned)t->ea;
- switch (a) {
- case SpA:
- RECORD(0);
- case SpB:
- RECORD(1);
- case Hp:
- RECORD(2);
- case HpLim:
- RECORD(3);
- case SuA:
- RECORD(4);
- case SuB:
- RECORD(5);
- case UpdReg:
- RECORD(6);
- case RetVecReg:
- RECORD(7);
- case TagReg:
- RECORD(8);
- case Ret1:
- RECORD(9);
- case Ret2:
- RECORD(10);
- case Ret3:
- RECORD(11);
- case Ret4:
- RECORD(12);
- case Ret5:
- RECORD(13);
- case Ret6:
- RECORD(14);
- case Ret7:
- RECORD(15);
- case Ret8:
- RECORD(16);
- deafualt:
- break;
- }
- }
- }
-
- if (countdown <= 0) {
- printregs("Intermediate:");
- countdown = CHECKPOINT;
- }
-}
-
-void
-terminate()
-{
- printregs("Final:");
-}
diff --git a/ghc/mkworld/GHC_OPTS b/ghc/mkworld/GHC_OPTS
index e0cc61c9a8..6c537e4c30 100644
--- a/ghc/mkworld/GHC_OPTS
+++ b/ghc/mkworld/GHC_OPTS
@@ -2,33 +2,63 @@
each build is compiled consistently
*/
-GHC_norm_OPTS=-O rts_or_lib(-optc-DGCap,)
-GHC_p_OPTS =-hisuf p_hi -O -prof -GPrelude rts_or_lib(-optc-DGCap,)
-GHC_t_OPTS =-hisuf t_hi -O -ticky -optc-DDEBUG rts_or_lib(-optc-DGCap,)
-GHC_u_OPTS =-hisuf u_hi -O -unregisterised ???? -ticky rts_or_lib(-optc-DGCap,)
-GHC_mc_OPTS=-hisuf mc_hi -O -concurrent rts_or_lib(-optc-DGCap,)
-GHC_mr_OPTS=-hisuf mr_hi -O -concurrent -prof -GPrelude rts_or_lib(-optc-DGCap,)
-GHC_mt_OPTS=-hisuf mt_hi -O -concurrent -ticky -optc-DDEBUG rts_or_lib(-optc-DGCap,)
-GHC_mp_OPTS=-hisuf mp_hi -O -parallel rts_or_lib(-optc-DGCap,)
-GHC_mg_OPTS=-hisuf mg_hi -O -gransim rts_or_lib(-optc-DGCap,)
-GHC_2s_OPTS=-hisuf 2s_hi -O -gc-2s rts_or_lib(-optc-DGC2s,)
-GHC_1s_OPTS=-hisuf 1s_hi -O -gc-1s rts_or_lib(-optc-DGC1s,)
-GHC_du_OPTS=-hisuf du_hi -O -gc-du rts_or_lib(-optc-DGCdu,)
-/* ToDo: mkworld-ify these user-way opts */
-GHC_a_OPTS =-hisuf a_hi -user-setup-a rts_or_lib(-optc-DGCap,)
-GHC_b_OPTS =-hisuf b_hi -user-setup-b rts_or_lib(-optc-DGCap,)
-GHC_c_OPTS =-hisuf c_hi -user-setup-c rts_or_lib(-optc-DGCap,)
-GHC_d_OPTS =-hisuf d_hi -user-setup-d rts_or_lib(-optc-DGCap,)
-GHC_e_OPTS =-hisuf e_hi -user-setup-e rts_or_lib(-optc-DGCap,)
-GHC_f_OPTS =-hisuf f_hi -user-setup-f rts_or_lib(-optc-DGCap,)
-GHC_g_OPTS =-hisuf g_hi -user-setup-g rts_or_lib(-optc-DGCap,)
-GHC_h_OPTS =-hisuf h_hi -user-setup-h rts_or_lib(-optc-DGCap,)
-GHC_i_OPTS =-hisuf i_hi -user-setup-i rts_or_lib(-optc-DGCap,)
-GHC_j_OPTS =-hisuf j_hi -user-setup-j rts_or_lib(-optc-DGCap,)
-GHC_k_OPTS =-hisuf k_hi -user-setup-k rts_or_lib(-optc-DGCap,)
-GHC_l_OPTS =-hisuf l_hi -user-setup-l rts_or_lib(-optc-DGCap,)
-GHC_m_OPTS =-hisuf m_hi -user-setup-m rts_or_lib(-optc-DGCap,)
-GHC_n_OPTS =-hisuf n_hi -user-setup-n rts_or_lib(-optc-DGCap,)
-GHC_o_OPTS =-hisuf o_hi -user-setup-o rts_or_lib(-optc-DGCap,)
-GHC_A_OPTS =-hisuf A_hi -user-setup-A rts_or_lib(-optc-DGCap,)
-GHC_B_OPTS =-hisuf B_hi -user-setup-B rts_or_lib(-optc-DGCap,)
+GHC_OPTS_norm=-O rts_or_lib(-optc-DGCap,)
+GHC_OPTS_p =-O -prof -GPrelude rts_or_lib(-optc-DGCap,) -hisuf p_hi -hisuf-prelude p_hi
+GHC_OPTS_t =-O -ticky -optc-DDEBUG rts_or_lib(-optc-DGCap,) -hisuf t_hi -hisuf-prelude t_hi
+GHC_OPTS_u =-O -unregisterised ???? -ticky rts_or_lib(-optc-DGCap,) -hisuf u_hi -hisuf-prelude u_hi
+GHC_OPTS_mc=-O -concurrent rts_or_lib(-optc-DGCap,) -hisuf mc_hi -hisuf-prelude mc_hi
+GHC_OPTS_mr=-O -concurrent -prof -GPrelude rts_or_lib(-optc-DGCap,) -hisuf mr_hi -hisuf-prelude mr_hi
+GHC_OPTS_mt=-O -concurrent -ticky -optc-DDEBUG rts_or_lib(-optc-DGCap,) -hisuf mt_hi -hisuf-prelude mt_hi
+GHC_OPTS_mp=-O -parallel rts_or_lib(-optc-DGCap,) -hisuf mp_hi -hisuf-prelude mp_hi
+GHC_OPTS_mg=-O -gransim rts_or_lib(-optc-DGCap,) -hisuf mg_hi -hisuf-prelude mg_hi
+GHC_OPTS_2s=-O -gc-2s rts_or_lib(-optc-DGC2s,) -hisuf 2s_hi -hisuf-prelude 2s_hi
+GHC_OPTS_1s=-O -gc-1s rts_or_lib(-optc-DGC1s,) -hisuf 1s_hi -hisuf-prelude 1s_hi
+GHC_OPTS_du=-O -gc-du rts_or_lib(-optc-DGCdu,) -hisuf du_hi -hisuf-prelude du_hi
+GHC_OPTS_a =-user-setup-a rts_or_lib(-optc-DGCap,) -hisuf a_hi -hisuf-prelude a_hi
+GHC_OPTS_b =-user-setup-b rts_or_lib(-optc-DGCap,) -hisuf b_hi -hisuf-prelude b_hi
+GHC_OPTS_c =-user-setup-c rts_or_lib(-optc-DGCap,) -hisuf c_hi -hisuf-prelude c_hi
+GHC_OPTS_d =-user-setup-d rts_or_lib(-optc-DGCap,) -hisuf d_hi -hisuf-prelude d_hi
+GHC_OPTS_e =-user-setup-e rts_or_lib(-optc-DGCap,) -hisuf e_hi -hisuf-prelude e_hi
+GHC_OPTS_f =-user-setup-f rts_or_lib(-optc-DGCap,) -hisuf f_hi -hisuf-prelude f_hi
+GHC_OPTS_g =-user-setup-g rts_or_lib(-optc-DGCap,) -hisuf g_hi -hisuf-prelude g_hi
+GHC_OPTS_h =-user-setup-h rts_or_lib(-optc-DGCap,) -hisuf h_hi -hisuf-prelude h_hi
+GHC_OPTS_i =-user-setup-i rts_or_lib(-optc-DGCap,) -hisuf i_hi -hisuf-prelude i_hi
+GHC_OPTS_j =-user-setup-j rts_or_lib(-optc-DGCap,) -hisuf j_hi -hisuf-prelude j_hi
+GHC_OPTS_k =-user-setup-k rts_or_lib(-optc-DGCap,) -hisuf k_hi -hisuf-prelude k_hi
+GHC_OPTS_l =-user-setup-l rts_or_lib(-optc-DGCap,) -hisuf l_hi -hisuf-prelude l_hi
+GHC_OPTS_m =-user-setup-m rts_or_lib(-optc-DGCap,) -hisuf m_hi -hisuf-prelude m_hi
+GHC_OPTS_n =-user-setup-n rts_or_lib(-optc-DGCap,) -hisuf n_hi -hisuf-prelude n_hi
+GHC_OPTS_o =-user-setup-o rts_or_lib(-optc-DGCap,) -hisuf o_hi -hisuf-prelude o_hi
+GHC_OPTS_A =-user-setup-A rts_or_lib(-optc-DGCap,) -hisuf A_hi -hisuf-prelude A_hi
+GHC_OPTS_B =-user-setup-B rts_or_lib(-optc-DGCap,) -hisuf B_hi -hisuf-prelude B_hi
+
+/* used in hslibs: */
+HC_OPTS_norm= $(GHC_OPTS_norm)
+HC_OPTS_p = $(GHC_OPTS_p)
+HC_OPTS_t = $(GHC_OPTS_t)
+HC_OPTS_u = $(GHC_OPTS_u)
+HC_OPTS_mc= $(GHC_OPTS_mc)
+HC_OPTS_mr= $(GHC_OPTS_mr)
+HC_OPTS_mt= $(GHC_OPTS_mt)
+HC_OPTS_mp= $(GHC_OPTS_mp)
+HC_OPTS_mg= $(GHC_OPTS_mg)
+HC_OPTS_2s= $(GHC_OPTS_2s)
+HC_OPTS_1s= $(GHC_OPTS_1s)
+HC_OPTS_du= $(GHC_OPTS_du)
+HC_OPTS_a = $(GHC_OPTS_a)
+HC_OPTS_b = $(GHC_OPTS_b)
+HC_OPTS_c = $(GHC_OPTS_c)
+HC_OPTS_d = $(GHC_OPTS_d)
+HC_OPTS_e = $(GHC_OPTS_e)
+HC_OPTS_f = $(GHC_OPTS_f)
+HC_OPTS_g = $(GHC_OPTS_g)
+HC_OPTS_h = $(GHC_OPTS_h)
+HC_OPTS_i = $(GHC_OPTS_i)
+HC_OPTS_j = $(GHC_OPTS_j)
+HC_OPTS_k = $(GHC_OPTS_k)
+HC_OPTS_l = $(GHC_OPTS_l)
+HC_OPTS_m = $(GHC_OPTS_m)
+HC_OPTS_n = $(GHC_OPTS_n)
+HC_OPTS_o = $(GHC_OPTS_o)
+HC_OPTS_A = $(GHC_OPTS_A)
+HC_OPTS_B = $(GHC_OPTS_B)
diff --git a/ghc/mkworld/macros-ghc.jm b/ghc/mkworld/macros-ghc.jm
index bbe0bca85e..18d33290a2 100644
--- a/ghc/mkworld/macros-ghc.jm
+++ b/ghc/mkworld/macros-ghc.jm
@@ -6,7 +6,7 @@ ExtraStuffToBeVeryClean(dir/fileroot.c dir/fileroot.h dir/fileroot.hs) @@\
dir/fileroot.c dir/CAT3(U_,fileroot,.hs): dir/fileroot.ugn @@\
$(RM) dir/fileroot.c dir/fileroot.hs dir/CAT3(U_,fileroot,.hs) dir/fileroot.h-SAVE @@\
if [ -f dir/fileroot.h ] ; then $(MV) -f dir/fileroot.h dir/fileroot.h-SAVE ; else exit 0 ; fi @@\
- $(UGEN) dir/fileroot.ugn || ( $(RM) dir/fileroot.h dir/fileroot.c dir/fileroot.hs; $(MV) -f dir/fileroot.h-SAVE dir/fileroot.h ) @@\
+ $(UGEN) dir/fileroot.ugn || ( $(RM) dir/fileroot.h dir/fileroot.c dir/CAT3(U_,fileroot,.hs); $(MV) -f dir/fileroot.h-SAVE dir/fileroot.h ) @@\
if cmp -s dir/fileroot.h-SAVE dir/fileroot.h ; then \ @@\
$(MV) -f dir/fileroot.h-SAVE dir/fileroot.h ; \ @@\
else \ @@\
diff --git a/ghc/mkworld/root.lit b/ghc/mkworld/root.lit
deleted file mode 100644
index 3c2a8cd7b3..0000000000
--- a/ghc/mkworld/root.lit
+++ /dev/null
@@ -1,10 +0,0 @@
-\section[mkworld-project-ghc]{``Make world'' files for the @ghc@ project}
-\downsection
-\input{site-ghc.ljm}
-\input{macros-ghc.ljm}
-\input{only4-ghc.ljm}
-\input{suffixes-ghc.ljm}
-\input{utils-ghc.ljm}
-\input{install-ghc.ljm}
-
-\upsection
diff --git a/ghc/runtime/Jmakefile b/ghc/runtime/Jmakefile
index bc4d74a425..713eda0cc3 100644
--- a/ghc/runtime/Jmakefile
+++ b/ghc/runtime/Jmakefile
@@ -72,7 +72,6 @@ RTS_LH = \
RTS_LC = \
c-as-asm/CallWrap_C.lc \
- c-as-asm/FreeMallocPtr.lc \
c-as-asm/HpOverflow.lc \
c-as-asm/StablePtr.lc \
c-as-asm/StablePtrOps.lc \
@@ -134,6 +133,7 @@ RTS_LHC = \
CLIB_LC = \
hooks/ErrorHdr.lc \
+ hooks/FreeForeignObj.lc \
hooks/OutOfHeap.lc \
hooks/OutOfStk.lc \
hooks/OutOfVM.lc \
@@ -203,11 +203,6 @@ CLIB_OBJS = $(CLIB_LC:.lc=.o)
* *
****************************************************************/
-#define CompileClibishly(file,flags) @@\
-CAT2(file,.o) : CAT2(file,.c) @@\
- $(RM) $@ @@\
- $(GHC) -c -o CAT2(file,.o) $(GHCFLAGS) flags CAT2(file,.c)
-
NormalLibraryTarget(HSclib,$(CLIB_OBJS))
ExtraStuffToClean($(CLIB_OBJS))
#if DoInstallGHCSystem == YES
@@ -215,17 +210,18 @@ InstallLibraryTarget(HSclib,$(INSTLIBDIR_GHC))
#endif
/* all .lc files, so far */
-CompileClibishly(hooks/ErrorHdr,)
-CompileClibishly(hooks/OutOfHeap,)
-CompileClibishly(hooks/OutOfStk,)
-CompileClibishly(hooks/OutOfVM,)
-CompileClibishly(hooks/NoRunnableThrds,)
-CompileClibishly(hooks/PatErrorHdr,)
-CompileClibishly(hooks/TraceHooks,)
-CompileClibishly(hooks/SizeHooks,)
-CompileClibishly(hooks/InitEachPE,)
-CompileClibishly(main/Mallocs,)
-CompileClibishly(main/TopClosure,)
+CompileCBitsly(GHC,hooks/ErrorHdr,)
+CompileCBitsly(GHC,hooks/FreeForeignObj,)
+CompileCBitsly(GHC,hooks/OutOfHeap,)
+CompileCBitsly(GHC,hooks/OutOfStk,)
+CompileCBitsly(GHC,hooks/OutOfVM,)
+CompileCBitsly(GHC,hooks/NoRunnableThrds,)
+CompileCBitsly(GHC,hooks/PatErrorHdr,)
+CompileCBitsly(GHC,hooks/TraceHooks,)
+CompileCBitsly(GHC,hooks/SizeHooks,)
+CompileCBitsly(GHC,hooks/InitEachPE,)
+CompileCBitsly(GHC,main/Mallocs,)
+CompileCBitsly(GHC,main/TopClosure,)
ExtraStuffToClean(main/TopClosure.o)
@@ -242,10 +238,10 @@ install :: main/TopClosure.o
# endif
AllTarget(gum/SysMan)
-gum/SysMan : gum/SysMan_mp.o gum/LLComms_mp.o main/Mallocs.o hooks/OutOfVM.o
+gum/SysMan : gum/SysMan.mp_o gum/LLComms.mp_o main/Mallocs.o hooks/OutOfVM.o
$(RM) $@
- $(CC) -o $@ gum/SysMan_mp.o gum/LLComms_mp.o main/Mallocs.o hooks/OutOfVM.o -L$$PVM_ROOT/lib/$$PVM_ARCH -lpvm3 -lgpvm3 __socket_libs
-ExtraStuffToClean(gum/SysMan_mp.o gum/SysMan)
+ $(CC) -o $@ gum/SysMan.mp_o gum/LLComms.mp_o main/Mallocs.o hooks/OutOfVM.o -L$$PVM_ROOT/lib/$$PVM_ARCH -lpvm3 -lgpvm3 __socket_libs
+ExtraStuffToClean(gum/SysMan.mp_o gum/SysMan)
# if DoInstallGHCSystem == YES
install :: gum/SysMan
$(INSTALL) -c $(INSTBINFLAGS) gum/SysMan $(INSTLIBDIR_GHC)/SysMan
@@ -261,29 +257,29 @@ install :: gum/SysMan
/* to build and install the per-build rts stuff */
#ifndef SpecialGhcRtsLibTarget
-#define SpecialGhcRtsLibTarget(tag,objs) @@\
-AllTarget(CAT3(libHSrts,tag,.a)) @@\
-ExtraStuffToClean(objs CAT3(libHSrts,tag,.a)) @@\
-CAT3(libHSrts,tag,.a):: $(H_FILES) objs @@\
- $(RM) $@ @@\
- $(AR) $@ objs @@\
+#define SpecialGhcRtsLibTarget(tag,objs) @@\
+AllTarget(CAT3(libHSrts,tag,.a)) @@\
+ExtraStuffToClean(objs CAT3(libHSrts,tag,.a)) @@\
+CAT3(libHSrts,tag,.a):: $(H_FILES) objs @@\
+ $(RM) $@ @@\
+ $(AR) $@ objs @@\
$(RANLIB) $@
#endif /* SpecialGhcRtsLibTarget */
#ifndef SpecialGhcRtsLibInstallTarget
#if DoInstallGHCSystem == YES
-#define SpecialGhcRtsLibInstallTarget(tag) @@\
-install :: CAT3(libHSrts,tag,.a) @@\
- $(INSTALL) $(INSTLIBFLAGS) \ @@\
- CAT3(libHSrts,tag,.a) \ @@\
- $(INSTLIBDIR_GHC)/CAT3(libHSrts,tag,.a) @@\
+#define SpecialGhcRtsLibInstallTarget(tag) @@\
+install :: CAT3(libHSrts,tag,.a) @@\
+ $(INSTALL) $(INSTLIBFLAGS) \ @@\
+ CAT3(libHSrts,tag,.a) \ @@\
+ $(INSTLIBDIR_GHC)/CAT3(libHSrts,tag,.a) @@\
$(RANLIB) $(INSTLIBDIR_GHC)/CAT3(libHSrts,tag,.a)
#else /* ! DoInstallGHC... */
#define SpecialGhcRtsLibInstallTarget(tag) /*nothing*/
#endif /* ! DoInstallGHC... */
#endif /* SpecialGhcRtsLibInstallTarget */
-#define BigBuildTarget(tag,objs) \
+#define BigBuildRtsTarget(tag,objs) \
SpecialGhcRtsLibTarget(tag,objs) @@\
SpecialGhcRtsLibInstallTarget(tag)
@@ -297,35 +293,35 @@ SpecialGhcRtsLibInstallTarget(tag)
MakeDirectories(install, $(INSTLIBDIR_GHC))
#endif /* DoInstallGHCSystem */
-IfBuild_normal(BigBuildTarget(,$(RTS_OBJS_norm)))
-IfBuild_p(BigBuildTarget(_p, $(RTS_OBJS_p)))
-IfBuild_t(BigBuildTarget(_t, $(RTS_OBJS_t)))
-IfBuild_u(BigBuildTarget(_u, $(RTS_OBJS_u)))
-IfBuild_mc(BigBuildTarget(_mc, $(RTS_OBJS_mc)))
-IfBuild_mr(BigBuildTarget(_mr, $(RTS_OBJS_mr)))
-IfBuild_mt(BigBuildTarget(_mt, $(RTS_OBJS_mt)))
-IfBuild_mp(BigBuildTarget(_mp, $(RTS_OBJS_mp)))
-IfBuild_mg(BigBuildTarget(_mg, $(RTS_OBJS_mg)))
-IfBuild_2s(BigBuildTarget(_2s, $(RTS_OBJS_2s)))
-IfBuild_1s(BigBuildTarget(_1s, $(RTS_OBJS_1s)))
-IfBuild_du(BigBuildTarget(_du, $(RTS_OBJS_du)))
-IfBuild_a(BigBuildTarget(_a, $(RTS_OBJS_a)))
-IfBuild_b(BigBuildTarget(_b, $(RTS_OBJS_b)))
-IfBuild_c(BigBuildTarget(_c, $(RTS_OBJS_c)))
-IfBuild_d(BigBuildTarget(_d, $(RTS_OBJS_d)))
-IfBuild_e(BigBuildTarget(_e, $(RTS_OBJS_e)))
-IfBuild_f(BigBuildTarget(_f, $(RTS_OBJS_f)))
-IfBuild_g(BigBuildTarget(_g, $(RTS_OBJS_g)))
-IfBuild_h(BigBuildTarget(_h, $(RTS_OBJS_h)))
-IfBuild_i(BigBuildTarget(_i, $(RTS_OBJS_i)))
-IfBuild_j(BigBuildTarget(_j, $(RTS_OBJS_j)))
-IfBuild_k(BigBuildTarget(_k, $(RTS_OBJS_k)))
-IfBuild_l(BigBuildTarget(_l, $(RTS_OBJS_l)))
-IfBuild_m(BigBuildTarget(_m, $(RTS_OBJS_m)))
-IfBuild_n(BigBuildTarget(_n, $(RTS_OBJS_n)))
-IfBuild_o(BigBuildTarget(_o, $(RTS_OBJS_o)))
-IfBuild_A(BigBuildTarget(_A, $(RTS_OBJS_A)))
-IfBuild_B(BigBuildTarget(_B, $(RTS_OBJS_B)))
+IfBuild_normal(BigBuildRtsTarget(,$(RTS_OBJS_norm)))
+IfBuild_p(BigBuildRtsTarget(_p, $(RTS_OBJS_p)))
+IfBuild_t(BigBuildRtsTarget(_t, $(RTS_OBJS_t)))
+IfBuild_u(BigBuildRtsTarget(_u, $(RTS_OBJS_u)))
+IfBuild_mc(BigBuildRtsTarget(_mc, $(RTS_OBJS_mc)))
+IfBuild_mr(BigBuildRtsTarget(_mr, $(RTS_OBJS_mr)))
+IfBuild_mt(BigBuildRtsTarget(_mt, $(RTS_OBJS_mt)))
+IfBuild_mp(BigBuildRtsTarget(_mp, $(RTS_OBJS_mp)))
+IfBuild_mg(BigBuildRtsTarget(_mg, $(RTS_OBJS_mg)))
+IfBuild_2s(BigBuildRtsTarget(_2s, $(RTS_OBJS_2s)))
+IfBuild_1s(BigBuildRtsTarget(_1s, $(RTS_OBJS_1s)))
+IfBuild_du(BigBuildRtsTarget(_du, $(RTS_OBJS_du)))
+IfBuild_a(BigBuildRtsTarget(_a, $(RTS_OBJS_a)))
+IfBuild_b(BigBuildRtsTarget(_b, $(RTS_OBJS_b)))
+IfBuild_c(BigBuildRtsTarget(_c, $(RTS_OBJS_c)))
+IfBuild_d(BigBuildRtsTarget(_d, $(RTS_OBJS_d)))
+IfBuild_e(BigBuildRtsTarget(_e, $(RTS_OBJS_e)))
+IfBuild_f(BigBuildRtsTarget(_f, $(RTS_OBJS_f)))
+IfBuild_g(BigBuildRtsTarget(_g, $(RTS_OBJS_g)))
+IfBuild_h(BigBuildRtsTarget(_h, $(RTS_OBJS_h)))
+IfBuild_i(BigBuildRtsTarget(_i, $(RTS_OBJS_i)))
+IfBuild_j(BigBuildRtsTarget(_j, $(RTS_OBJS_j)))
+IfBuild_k(BigBuildRtsTarget(_k, $(RTS_OBJS_k)))
+IfBuild_l(BigBuildRtsTarget(_l, $(RTS_OBJS_l)))
+IfBuild_m(BigBuildRtsTarget(_m, $(RTS_OBJS_m)))
+IfBuild_n(BigBuildRtsTarget(_n, $(RTS_OBJS_n)))
+IfBuild_o(BigBuildRtsTarget(_o, $(RTS_OBJS_o)))
+IfBuild_A(BigBuildRtsTarget(_A, $(RTS_OBJS_A)))
+IfBuild_B(BigBuildRtsTarget(_B, $(RTS_OBJS_B)))
/****************************************************************
@@ -334,46 +330,45 @@ IfBuild_B(BigBuildTarget(_B, $(RTS_OBJS_B)))
* *
****************************************************************/
-#define DoRtsFile(file,isuf,way,flags) @@\
+#define DoRtsFile(file,isuf,way,vsuf,flags) @@\
CAT3(file.,way,o) : CAT2(file,isuf) @@\
$(RM) $@ @@\
- $(GHC) -c -o CAT3(file.,way,o) $(GHCFLAGS) flags $(CAT4(GHC,_,way,OPTS)) CAT2(file,isuf)
-
-#define CompileRTSishly(file,isuf,flags) @@\
-IfBuild_normal(DoRtsFile(file,isuf,,flags)) \
-IfBuild_p(DoRtsFile(file,isuf,p_, flags)) \
-IfBuild_t(DoRtsFile(file,isuf,t_, flags)) \
-IfBuild_u(DoRtsFile(file,isuf,u_, flags)) \
-IfBuild_mc(DoRtsFile(file,isuf,mc_, flags)) \
-IfBuild_mr(DoRtsFile(file,isuf,mr_, flags)) \
-IfBuild_mt(DoRtsFile(file,isuf,mt_, flags)) \
-IfBuild_mp(DoRtsFile(file,isuf,mp_, flags)) \
-IfBuild_mg(DoRtsFile(file,isuf,mg_, flags)) \
-IfBuild_2s(DoRtsFile(file,isuf,2s_, flags)) \
-IfBuild_1s(DoRtsFile(file,isuf,1s_, flags)) \
-IfBuild_du(DoRtsFile(file,isuf,du_, flags)) \
-IfBuild_a(DoRtsFile(file,isuf,a_, flags)) \
-IfBuild_b(DoRtsFile(file,isuf,b_, flags)) \
-IfBuild_c(DoRtsFile(file,isuf,c_, flags)) \
-IfBuild_d(DoRtsFile(file,isuf,d_, flags)) \
-IfBuild_e(DoRtsFile(file,isuf,e_, flags)) \
-IfBuild_f(DoRtsFile(file,isuf,f_, flags)) \
-IfBuild_g(DoRtsFile(file,isuf,g_, flags)) \
-IfBuild_h(DoRtsFile(file,isuf,h_, flags)) \
-IfBuild_i(DoRtsFile(file,isuf,i_, flags)) \
-IfBuild_j(DoRtsFile(file,isuf,j_, flags)) \
-IfBuild_k(DoRtsFile(file,isuf,k_, flags)) \
-IfBuild_l(DoRtsFile(file,isuf,l_, flags)) \
-IfBuild_m(DoRtsFile(file,isuf,m_, flags)) \
-IfBuild_n(DoRtsFile(file,isuf,n_, flags)) \
-IfBuild_o(DoRtsFile(file,isuf,o_, flags)) \
-IfBuild_A(DoRtsFile(file,isuf,A_, flags)) \
-IfBuild_B(DoRtsFile(file,isuf,B_, flags))
+ $(GHC) -c -o CAT3(file.,way,o) $(GHCFLAGS) flags $(CAT3(GHC,_OPTS,vsuf)) CAT2(file,isuf)
+
+#define CompileRTSishly(file,isuf,flags) @@\
+IfBuild_normal(DoRtsFile(file,isuf,,_norm,flags)) \
+IfBuild_p(DoRtsFile(file,isuf,p_,_p, flags)) \
+IfBuild_t(DoRtsFile(file,isuf,t_,_t, flags)) \
+IfBuild_u(DoRtsFile(file,isuf,u_,_u, flags)) \
+IfBuild_mc(DoRtsFile(file,isuf,mc_,_mc, flags)) \
+IfBuild_mr(DoRtsFile(file,isuf,mr_,_mr, flags)) \
+IfBuild_mt(DoRtsFile(file,isuf,mt_,_mt, flags)) \
+IfBuild_mp(DoRtsFile(file,isuf,mp_,_mp, flags)) \
+IfBuild_mg(DoRtsFile(file,isuf,mg_,_mg, flags)) \
+IfBuild_2s(DoRtsFile(file,isuf,2s_,_2s, flags)) \
+IfBuild_1s(DoRtsFile(file,isuf,1s_,_1s, flags)) \
+IfBuild_du(DoRtsFile(file,isuf,du_,_du, flags)) \
+IfBuild_a(DoRtsFile(file,isuf,a_,_a, flags)) \
+IfBuild_b(DoRtsFile(file,isuf,b_,_b, flags)) \
+IfBuild_c(DoRtsFile(file,isuf,c_,_c, flags)) \
+IfBuild_d(DoRtsFile(file,isuf,d_,_d, flags)) \
+IfBuild_e(DoRtsFile(file,isuf,e_,_e, flags)) \
+IfBuild_f(DoRtsFile(file,isuf,f_,_f, flags)) \
+IfBuild_g(DoRtsFile(file,isuf,g_,_g, flags)) \
+IfBuild_h(DoRtsFile(file,isuf,h_,_h, flags)) \
+IfBuild_i(DoRtsFile(file,isuf,i_,_i, flags)) \
+IfBuild_j(DoRtsFile(file,isuf,j_,_j, flags)) \
+IfBuild_k(DoRtsFile(file,isuf,k_,_k, flags)) \
+IfBuild_l(DoRtsFile(file,isuf,l_,_l, flags)) \
+IfBuild_m(DoRtsFile(file,isuf,m_,_m, flags)) \
+IfBuild_n(DoRtsFile(file,isuf,n_,_n, flags)) \
+IfBuild_o(DoRtsFile(file,isuf,o_,_o, flags)) \
+IfBuild_A(DoRtsFile(file,isuf,A_,_A, flags)) \
+IfBuild_B(DoRtsFile(file,isuf,B_,_B, flags))
/* here we go: */
CompileRTSishly(c-as-asm/CallWrap_C,.c,)
-CompileRTSishly(c-as-asm/FreeMallocPtr,.c,)
CompileRTSishly(c-as-asm/HpOverflow,.c,)
CompileRTSishly(c-as-asm/PerformIO,.hc,-optc-DIN_GHC_RTS=1)
CompileRTSishly(c-as-asm/StablePtr,.c,)
diff --git a/ghc/runtime/c-as-asm/FreeMallocPtr.lc b/ghc/runtime/c-as-asm/FreeMallocPtr.lc
deleted file mode 100644
index 8dd9d03e0f..0000000000
--- a/ghc/runtime/c-as-asm/FreeMallocPtr.lc
+++ /dev/null
@@ -1,22 +0,0 @@
-\section[freemallocptr]{FreeMallocPtr}
-
-ToDo: obliterate -- SOF
-
-This is the default definition of FreeMallocPtr. It is a file by
-itself so that the linker can choose to ignore it if it has already
-seen a definition of FreeMallocPtr.
-
-\begin{code}
-#ifndef PAR
-
-#include "rtsdefs.h"
-
-void
-FreeMallocPtr (StgForeignObj mp)
-{
- fprintf(stderr, "Error: No deallocation routine for MallocPtr %lx\n", (W_) mp);
- EXIT(EXIT_FAILURE);
-}
-
-#endif /* not PAR */
-\end{code}
diff --git a/ghc/runtime/c-as-asm/HpOverflow.lc b/ghc/runtime/c-as-asm/HpOverflow.lc
index ed76a80dfe..8e013f9d77 100644
--- a/ghc/runtime/c-as-asm/HpOverflow.lc
+++ b/ghc/runtime/c-as-asm/HpOverflow.lc
@@ -102,7 +102,7 @@ RealPerformGC(liveness, reqsize, always_reenter_node, do_full_collection)
}
# endif
# if defined(GRAN)
- ReSchedule(SAME_THREAD); /* ToDo: Check HWL */
+ ReSchedule(SAME_THREAD);
# else
ReSchedule(1);
# endif
@@ -180,7 +180,7 @@ RealPerformGC(liveness, reqsize, always_reenter_node, do_full_collection)
GC_result = collectHeap(reqsize, &StorageMgrInfo, do_full_collection);
if ( GC_result == GC_HARD_LIMIT_EXCEEDED ) {
- OutOfHeapHook(reqsize * sizeof(W_)); /*msg*/
+ OutOfHeapHook(reqsize * sizeof(W_), RTSflags.GcFlags.heapSize * sizeof(W_)); /*msg*/
shutdownHaskell();
EXIT(EXIT_FAILURE);
@@ -736,24 +736,31 @@ rtsBool do_full_collection;
PruneSparks();
# if defined(GRAN)
+ traverse_eventq_for_gc(); /* tidy up eventq for GC */
+
/* Store head and tail of runnable lists as roots for GC */
- for(proc = 0; proc < RTSflags.GranFlags.proc; ++proc) {
+ if (RTSflags.GranFlags.Light) {
+ StorageMgrInfo.roots[num_ptr_roots++] = RunnableThreadsHd[0];
+ StorageMgrInfo.roots[num_ptr_roots++] = RunnableThreadsTl[0];
+ } else {
+ for(proc = 0; proc < RTSflags.GranFlags.proc; ++proc) {
# if defined(GRAN_CHECK) && defined(GRAN)
- if ( RTSflags.GcFlags.giveStats && (RTSflags.GranFlags.debug & 0x40) )
- fprintf(RTSflags.GcFlags.statsFile,"Saving RunnableThreadsHd %d (proc: %d) -- 0x%lx\n",
- num_ptr_roots,proc,RunnableThreadsHd[proc]);
+ if ( RTSflags.GcFlags.giveStats && (RTSflags.GranFlags.debug & 0x40) )
+ fprintf(RTSflags.GcFlags.statsFile,"Saving RunnableThreadsHd %d (proc: %d) -- 0x%lx\n",
+ num_ptr_roots,proc,RunnableThreadsHd[proc]);
# endif
-
- StorageMgrInfo.roots[num_ptr_roots++] = RunnableThreadsHd[proc];
-
+
+ StorageMgrInfo.roots[num_ptr_roots++] = RunnableThreadsHd[proc];
+
# if defined(GRAN_CHECK) && defined(GRAN)
- if ( RTSflags.GcFlags.giveStats && (RTSflags.GranFlags.debug & 0x40) )
- fprintf(RTSflags.GcFlags.statsFile,"Saving RunnableThreadsTl %d (proc: %d) -- 0x%lx\n",
- num_ptr_roots,proc,RunnableThreadsTl[proc]);
+ if ( RTSflags.GcFlags.giveStats && (RTSflags.GranFlags.debug & 0x40) )
+ fprintf(RTSflags.GcFlags.statsFile,"Saving RunnableThreadsTl %d (proc: %d) -- 0x%lx\n",
+ num_ptr_roots,proc,RunnableThreadsTl[proc]);
# endif
- StorageMgrInfo.roots[num_ptr_roots++] = RunnableThreadsTl[proc];
-
- } /* forall proc ... */
+ StorageMgrInfo.roots[num_ptr_roots++] = RunnableThreadsTl[proc];
+
+ } /* forall proc ... */
+ } /* RTSflags.GranFlags.Light */
/* This is now done as part of collectHeap (see ../storage dir) */
/* num_ptr_roots = SaveSparkRoots(num_ptr_roots); */
@@ -797,7 +804,7 @@ rtsBool do_full_collection;
/* ====> The REAL THING happens here */
if (collectHeap(reqsize, &StorageMgrInfo, do_full_collection) != GC_SUCCESS) {
- OutOfHeapHook(reqsize * sizeof(W_)); /*msg*/
+ OutOfHeapHook(reqsize * sizeof(W_), RTSflags.GcFlags.heapSize * sizeof(W_)); /*msg*/
# if defined(TICKY_TICKY)
if (RTSflags.TickyFlags.showTickyStats) PrintTickyInfo();
@@ -842,27 +849,30 @@ rtsBool do_full_collection;
/* NB: PROC is unsigned datatype i.e. (PROC)-1 > 0 ! */
- for(proc = RTSflags.GranFlags.proc - 1;
- (proc >= 0) && (proc < RTSflags.GranFlags.proc) ;
- --proc) {
+ if (RTSflags.GranFlags.Light) {
+ RunnableThreadsTl[0] = StorageMgrInfo.roots[--num_ptr_roots] ;
+ RunnableThreadsHd[0] = StorageMgrInfo.roots[--num_ptr_roots] ;
+ } else {
+ for(proc = RTSflags.GranFlags.proc - 1;
+ (proc >= 0) && (proc < RTSflags.GranFlags.proc) ;
+ --proc) {
# if defined(GRAN_CHECK) && defined(GRAN)
- if ( RTSflags.GcFlags.giveStats && (RTSflags.GranFlags.debug & 0x40) )
- fprintf(RTSflags.GcFlags.statsFile,
- "Restoring RunnableThreadsTl %d (proc: %d) -- new: 0x%lx\n",
- num_ptr_roots-1,proc,StorageMgrInfo.roots[num_ptr_roots-1]);
+ if ( RTSflags.GcFlags.giveStats && (RTSflags.GranFlags.debug & 0x40) )
+ fprintf(RTSflags.GcFlags.statsFile,
+ "Restoring RunnableThreadsTl %d (proc: %d) -- new: 0x%lx\n",
+ num_ptr_roots-1,proc,StorageMgrInfo.roots[num_ptr_roots-1]);
# endif
-
- RunnableThreadsTl[proc] = StorageMgrInfo.roots[--num_ptr_roots];
-
+ RunnableThreadsTl[proc] = StorageMgrInfo.roots[--num_ptr_roots];
+
# if defined(GRAN_CHECK) && defined(GRAN)
- if ( RTSflags.GcFlags.giveStats && (RTSflags.GranFlags.debug & 0x40) )
- fprintf(RTSflags.GcFlags.statsFile,
- "Restoring RunnableThreadsHd %d (proc: %d) -- new: 0x%lx\n",
- num_ptr_roots-1,proc,StorageMgrInfo.roots[num_ptr_roots-1]);
+ if ( RTSflags.GcFlags.giveStats && (RTSflags.GranFlags.debug & 0x40) )
+ fprintf(RTSflags.GcFlags.statsFile,
+ "Restoring RunnableThreadsHd %d (proc: %d) -- new: 0x%lx\n",
+ num_ptr_roots-1,proc,StorageMgrInfo.roots[num_ptr_roots-1]);
# endif
-
- RunnableThreadsHd[proc] = StorageMgrInfo.roots[--num_ptr_roots];
- } /* forall proc ... */
+ RunnableThreadsHd[proc] = StorageMgrInfo.roots[--num_ptr_roots];
+ } /* forall proc ... */
+ } /* RTSflags.GranFlags.Light */
# endif /* GRAN */
@@ -909,12 +919,3 @@ BlackHoleUpdateStack(STG_NO_ARGS)
}
#endif /* !CONCURRENT */
\end{code}
-
-
-\begin{code}
-#if 0 /* defined(CONCURRENT) && !defined(GRAN) */
-void
-PerformReschedule(W_ liveness, W_ always_reenter_node)
-{ }
-#endif
-\end{code}
diff --git a/ghc/runtime/gum/LLComms.lc b/ghc/runtime/gum/LLComms.lc
index 200ca3450f..3c9214083a 100644
--- a/ghc/runtime/gum/LLComms.lc
+++ b/ghc/runtime/gum/LLComms.lc
@@ -371,12 +371,12 @@ unsigned nPEs;
}
addr = WaitForPEOp(PP_PETIDS, ANY_GLOBAL_TASK);
GetArgs(buffer, nPEs);
-#if 0
for (i = 0; i < nPEs; ++i) {
PEs[i] = (GLOBAL_TASK_ID) buffer[i];
+#if 0
fprintf(stderr,"PEs[%d] = %x \n", i, PEs[i]);
- }
#endif
+ }
free(buffer);
return PEs;
}
diff --git a/ghc/runtime/gum/Pack.lc b/ghc/runtime/gum/Pack.lc
index f4f9572888..26891b8b11 100644
--- a/ghc/runtime/gum/Pack.lc
+++ b/ghc/runtime/gum/Pack.lc
@@ -153,7 +153,7 @@ W_ *packbuffersize;
ASSERT(packlocn <= RTSflags.ParFlags.packBufferSize);
*packbuffersize = packlocn;
# else /* GRAN */
- ASSERT(packlocn <= PackBuffer[PACK_SIZE_LOCN]);
+ ASSERT(packlocn <= PackBuffer[PACK_SIZE_LOCN]+PACK_HDR_SIZE);
/* ToDo: Print an earlier, more meaningful message */
if (packlocn==PACK_HDR_SIZE) { /* i.e. packet is empty */
fprintf(stderr,"EMPTY PACKET! Can't transfer closure %#lx at all!!\n",
diff --git a/ghc/runtime/gum/ParInit.lc b/ghc/runtime/gum/ParInit.lc
index 6f331e870b..277f5008c0 100644
--- a/ghc/runtime/gum/ParInit.lc
+++ b/ghc/runtime/gum/ParInit.lc
@@ -156,7 +156,7 @@ SynchroniseSystem(STG_NO_ARGS)
for (i = 0; i < nPEs; ++i)
registerTask(PEs[i]);
-/* pvm_notify( PvmTaskExit, PP_FAIL, 1, &SysManTask); /* Setup an error handler */
+/* pvm_notify( PvmTaskExit, PP_FAIL, 1, &SysManTask); /? Setup an error handler */
/* Initialise the PE task array? */
}
diff --git a/ghc/runtime/gum/RBH.lc b/ghc/runtime/gum/RBH.lc
index 5b94bee311..18fef5a22a 100644
--- a/ghc/runtime/gum/RBH.lc
+++ b/ghc/runtime/gum/RBH.lc
@@ -247,6 +247,17 @@ P_ closure;
/* Put back old info pointer (only in GrAnSim) -- HWL */
SET_INFO_PTR(closure, REVERT_INFOPTR(INFO_PTR(closure)));
+# if (defined(GCap) || defined(GCgn))
+ /* If we convert from an RBH in the old generation,
+ we have to make sure it goes on the mutables list */
+
+ if(closure <= StorageMgrInfo.OldLim) {
+ if (IS_MUTABLE(INFO_PTR(closure)) && MUT_LINK(closure) == MUT_NOT_LINKED) {
+ MUT_LINK(closure) = (W_) StorageMgrInfo.OldMutables;
+ StorageMgrInfo.OldMutables = closure;
+ }
+ }
+# endif
}
/* Remove closure from the mutables list */
@@ -268,9 +279,24 @@ UnlinkFromMUT(P_ closure)
MUT_LINK(prev) = MUT_LINK(curr);
MUT_LINK(curr) = MUT_NOT_LINKED;
}
+
+#if 0 && (defined(GCap) || defined(GCgn))
+ {
+ closq newclos;
+ extern closq ex_RBH_q;
+
+ newclos = (closq) stgMallocBytes(sizeof(struct clos), "UnlinkFromMUT");
+ CLOS_CLOSURE(newclos) = closure;
+ CLOS_PREV(newclos) = NULL;
+ CLOS_NEXT(newclos) = ex_RBH_q;
+ if (ex_RBH_q!=NULL)
+ CLOS_PREV(ex_RBH_q) = newclos;
+ ex_RBH_q = newclos;
+ }
+#endif
}
#endif /* PAR */
-#endif /* PAR -- whole file */
+#endif /* PAR || GRAN -- whole file */
\end{code}
diff --git a/ghc/runtime/gum/SysMan.lc b/ghc/runtime/gum/SysMan.lc
index bfe2e7d2e5..b1e9d13e26 100644
--- a/ghc/runtime/gum/SysMan.lc
+++ b/ghc/runtime/gum/SysMan.lc
@@ -112,7 +112,7 @@ DoGlobalGC(STG_NO_ARGS)
{}
/*
static void
-HandleException(STG_NO_ARGS)
+HandleException(PACKET p)
{}
*/
\end{code}
diff --git a/ghc/runtime/hooks/ErrorHdr.lc b/ghc/runtime/hooks/ErrorHdr.lc
index 87435f9901..d71a3a9b2e 100644
--- a/ghc/runtime/hooks/ErrorHdr.lc
+++ b/ghc/runtime/hooks/ErrorHdr.lc
@@ -2,8 +2,7 @@
#include "rtsdefs.h"
void
-ErrorHdrHook (where)
- FILE *where;
+ErrorHdrHook (FILE *where)
{
fprintf(where, "\nFail: ");
}
diff --git a/ghc/runtime/hooks/FreeForeignObj.lc b/ghc/runtime/hooks/FreeForeignObj.lc
new file mode 100644
index 0000000000..14dd4c92a8
--- /dev/null
+++ b/ghc/runtime/hooks/FreeForeignObj.lc
@@ -0,0 +1,18 @@
+\section{FreeForeignObj}
+
+ToDo: obliterate -- SOF
+
+This is the default definition of freeForeignObj. It is a file by
+itself so that the linker can choose to ignore it if it has already
+seen a definition of freeForeignObj.
+
+\begin{code}
+#include "rtsdefs.h"
+
+void
+freeForeignObj (StgForeignObj mp)
+{
+ fprintf(stderr, "Error: No deallocation routine for ForeignObj %lx\n", (W_) mp);
+ EXIT(EXIT_FAILURE);
+}
+\end{code}
diff --git a/ghc/runtime/hooks/NoRunnableThrds.lc b/ghc/runtime/hooks/NoRunnableThrds.lc
index 3ac6011aee..c9b351c4b8 100644
--- a/ghc/runtime/hooks/NoRunnableThrds.lc
+++ b/ghc/runtime/hooks/NoRunnableThrds.lc
@@ -5,7 +5,7 @@
#include "rtsdefs.h"
void
-NoRunnableThreadsHook ()
+NoRunnableThreadsHook (void)
{
fprintf(stderr, "No runnable threads!\n");
}
diff --git a/ghc/runtime/hooks/OutOfHeap.lc b/ghc/runtime/hooks/OutOfHeap.lc
index a1c6110489..8db9fa8e96 100644
--- a/ghc/runtime/hooks/OutOfHeap.lc
+++ b/ghc/runtime/hooks/OutOfHeap.lc
@@ -2,11 +2,8 @@
#include "rtsdefs.h"
void
-OutOfHeapHook (request_size)
- W_ request_size; /* in bytes */
+OutOfHeapHook (W_ request_size, W_ heap_size) /* both sizes in bytes */
{
- W_ heap_size = RTSflags.GcFlags.heapSize * sizeof(W_); /* i.e., in bytes */
-
fprintf(stderr, "Heap exhausted;\nwhile trying to allocate %lu bytes in a %lu-byte heap;\nuse `+RTS -H<size>' to increase the total heap size.\n",
request_size,
heap_size);
diff --git a/ghc/runtime/hooks/OutOfStk.lc b/ghc/runtime/hooks/OutOfStk.lc
index 470562117a..58a1a855fe 100644
--- a/ghc/runtime/hooks/OutOfStk.lc
+++ b/ghc/runtime/hooks/OutOfStk.lc
@@ -2,8 +2,7 @@
#include "rtsdefs.h"
void
-StackOverflowHook (stack_size)
- I_ stack_size; /* in bytes */
+StackOverflowHook (I_ stack_size) /* in bytes */
{
fprintf(stderr, "Stack space overflow: current size %ld bytes.\nUse `+RTS -Ksize' to increase it.\n", stack_size);
}
diff --git a/ghc/runtime/hooks/OutOfVM.lc b/ghc/runtime/hooks/OutOfVM.lc
index 60345320ec..a5a108e092 100644
--- a/ghc/runtime/hooks/OutOfVM.lc
+++ b/ghc/runtime/hooks/OutOfVM.lc
@@ -2,9 +2,7 @@
#include "rtsdefs.h"
void
-MallocFailHook (request_size, msg)
- I_ request_size; /* in bytes */
- char *msg;
+MallocFailHook (I_ request_size /* in bytes */, char *msg)
{
fprintf(stderr, "malloc: failed on request for %lu bytes; message: %s\n", request_size, msg);
}
diff --git a/ghc/runtime/hooks/PatErrorHdr.lc b/ghc/runtime/hooks/PatErrorHdr.lc
index 17062fb2db..5e175a49a8 100644
--- a/ghc/runtime/hooks/PatErrorHdr.lc
+++ b/ghc/runtime/hooks/PatErrorHdr.lc
@@ -2,8 +2,7 @@
#include "rtsdefs.h"
void
-PatErrorHdrHook (where)
- FILE *where;
+PatErrorHdrHook (FILE *where)
{
fprintf(where, "\nFail: ");
}
diff --git a/ghc/runtime/hooks/TraceHooks.lc b/ghc/runtime/hooks/TraceHooks.lc
index a64f9cfe61..67b7ea39c8 100644
--- a/ghc/runtime/hooks/TraceHooks.lc
+++ b/ghc/runtime/hooks/TraceHooks.lc
@@ -2,15 +2,13 @@
#include "rtsdefs.h"
void
-PreTraceHook (where)
- FILE *where;
+PreTraceHook (FILE *where)
{
fprintf(where, "Trace On:\n");
}
void
-PostTraceHook (where)
- FILE *where;
+PostTraceHook (FILE *where)
{
fprintf(where, "\nTrace Off.\n");
}
diff --git a/ghc/runtime/main/GranSim.lc b/ghc/runtime/main/GranSim.lc
index f8531aede7..41e7208737 100644
--- a/ghc/runtime/main/GranSim.lc
+++ b/ghc/runtime/main/GranSim.lc
@@ -2,7 +2,7 @@
% (c) The GRASP/AQUA Project, Glasgow University, 1995 - 1996
% Hans Wolfgang Loidl
%
-% Time-stamp: <Wed Jun 19 1996 16:38:25 Stardate: [-31]7683.25 hwloidl>
+% Time-stamp: <Thu Jul 25 1996 04:01:57 Stardate: [-31]7860.63 hwloidl>
%
%************************************************************************
%* *
@@ -77,6 +77,9 @@ char *proc_status_names[] = {
unsigned CurrentProc = 0;
rtsBool IgnoreEvents = rtsFalse; /* HACK only for testing */
+#if 0 && (defined(GCap) || defined(GCgn))
+closq ex_RBH_q = NULL;
+#endif
#endif /* GRAN */
\end{code}
@@ -440,6 +443,50 @@ grab_event() /* undo prepend_event i.e. get the event */
return (event);
}
+void
+traverse_eventq_for_gc()
+{
+ eventq event = EventHd;
+ W_ bufsize;
+ P_ closure, tso, buffer, bufptr;
+ PROC proc, creator;
+
+ /* Traverse eventq and replace every FETCHREPLY by a FETCHNODE for the
+ orig closure (root of packed graph). This means that a graph, which is
+ between processors at the time of GC is fetched again at the time when
+ it would have arrived, had there been no GC. Slightly inaccurate but
+ safe for GC.
+ This is only needed for GUM style fetchng. */
+ if (!RTSflags.GranFlags.DoGUMMFetching)
+ return;
+
+ for(event = EventHd; event!=NULL; event=EVENT_NEXT(event)) {
+ if (EVENT_TYPE(event)==FETCHREPLY) {
+ buffer = EVENT_NODE(event);
+ ASSERT(buffer[PACK_FLAG_LOCN]==MAGIC_PACK_FLAG); /* It's a pack buffer */
+ bufsize = buffer[PACK_SIZE_LOCN];
+ closure= (P_)buffer[PACK_HDR_SIZE];
+ tso = (P_)buffer[PACK_TSO_LOCN];
+ proc = EVENT_PROC(event);
+ creator = EVENT_CREATOR(event); /* similar to unpacking */
+ for (bufptr=buffer+PACK_HDR_SIZE; bufptr<(buffer+bufsize);
+ bufptr++) {
+ if ( (INFO_TYPE(INFO_PTR(*bufptr)) == INFO_SPEC_RBH_TYPE) ||
+ (INFO_TYPE(INFO_PTR(*bufptr)) == INFO_GEN_RBH_TYPE) ) {
+ convertFromRBH((P_)*bufptr);
+ }
+ }
+ free(buffer);
+ EVENT_TYPE(event) = FETCHNODE;
+ EVENT_PROC(event) = creator;
+ EVENT_CREATOR(event) = proc;
+ EVENT_NODE(event) = closure;
+ EVENT_TSO(event) = tso;
+ EVENT_GC_INFO(event) = 0;
+ }
+ }
+}
+
void
print_event(event)
eventq event;
@@ -984,7 +1031,7 @@ I_ len;
fprintf(gr_file,"PE %2u [%s]: %-9s\t%lx\t%s\t[SN %u]\n",
/* using spark name as optional argument ^^^^^^ */
proc,time_string,gran_event_names[name],
- id,node_str,(len & NEW_SPARKNAME_MASK));
+ id,node_str,len);
break;
case GR_FETCH:
case GR_REPLY:
@@ -1035,8 +1082,8 @@ I_ len;
return;
id = tso == NULL ? -1 : TSO_ID(tso);
- if (node==Nil_closure)
- strcpy(node_str,"________"); /* "Nil_closure"); */
+ if (node==Prelude_Z91Z93_closure)
+ strcpy(node_str,"________"); /* "Z91Z93_closure"); */
else
sprintf(node_str,"0x%-6lx",node);
@@ -1068,7 +1115,7 @@ I_ len;
fprintf(gr_file,"PE %2u [%s]: %-9s\t%lx\t%s\t[SN %u]\n",
/* using spark name as optional argument ^^^^^^ */
proc,time_string,gran_event_names[name],
- id,node_str,(len & NEW_SPARKNAME_MASK));
+ id,node_str,len);
break;
default:
fprintf(stderr,"Error in DumpStartEventAt: event %s is not a START event\n",
diff --git a/ghc/runtime/main/RtsFlags.lc b/ghc/runtime/main/RtsFlags.lc
index 616c48f867..417cc7fd56 100644
--- a/ghc/runtime/main/RtsFlags.lc
+++ b/ghc/runtime/main/RtsFlags.lc
@@ -288,7 +288,7 @@ usage_text[] = {
" -O Disable output for performance measurement",
# endif /* PAR */
# ifdef GRAN /* ToDo: fill in decent Docu here */
-" -b... All GranSim options start with -b, and there are many of them",
+" -b... All GranSim options start with -b; see GranSim User's Guide for details",
# endif
#endif /* CONCURRENT */
"",
@@ -548,8 +548,13 @@ error = rtsTrue;
RTSflags.CcFlags.sortBy = rts_argv[arg][2];
break;
default:
+ PAR_BUILD_ONLY(
+ break; /* we do not care about sortBy for parallel */
+ )
+ PROFILING_BUILD_ONLY(
fprintf(stderr, "Invalid profiling sort option %s\n", rts_argv[arg]);
error = rtsTrue;
+ )
}
) break;
@@ -1243,6 +1248,17 @@ allowed\n",
/* features in connection with exploiting granularity */
/* information. I.e. if -bY is chosen these options */
/* tell the RTS what to do with the supplied info --HWL */
+
+ case 'W':
+ if (rts_argv[arg][3] != '\0') {
+ RTSflags.GranFlags.packBufferSize_internal = decode(rts_argv[arg]+3);
+ } else {
+ RTSflags.GranFlags.packBufferSize_internal = GRANSIM_DEFAULT_PACK_BUFFER_SIZE;
+ }
+ fprintf(stderr,"Size of GranSim internal pack buffer: %u.\n",
+ RTSflags.GranFlags.packBufferSize_internal);
+ break;
+
case 'X':
switch(rts_argv[arg][3]) {
@@ -1450,6 +1466,11 @@ allowed\n",
RTSflags.GranFlags.debug |= 0x4000;
break;
+ case 'P':
+ fprintf(stderr,"Debug pack buffer handling.\n");
+ RTSflags.GranFlags.debug |= 0x8000;
+ break;
+
case 's':
fprintf(stderr,"Debug spark-queue manipulations.\n");
RTSflags.GranFlags.debug |= 0x10000;
diff --git a/ghc/runtime/main/Signals.lc b/ghc/runtime/main/Signals.lc
index 82f749be14..bfc10c374e 100644
--- a/ghc/runtime/main/Signals.lc
+++ b/ghc/runtime/main/Signals.lc
@@ -428,6 +428,8 @@ more_handlers(I_ sig)
nHandlers = sig + 1;
}
+I_ nocldstop = 0;
+
# ifdef _POSIX_SOURCE
static void
@@ -470,8 +472,6 @@ unblockUserSignals(void)
}
-I_ nocldstop = 0;
-
I_
sig_install(sig, spi, mask)
I_ sig;
diff --git a/ghc/runtime/main/StgUpdate.lhc b/ghc/runtime/main/StgUpdate.lhc
index 5a229ecdb6..04d2a5c96b 100644
--- a/ghc/runtime/main/StgUpdate.lhc
+++ b/ghc/runtime/main/StgUpdate.lhc
@@ -391,7 +391,7 @@ STGFUN(Perm_Ind_entry)
/* Don't add INDs to granularity cost */
- /* Dont: ENT_IND(Node); for ticky-ticky; this ind is here only to help ticky */
+ /* Dont: ENT_IND(Node); for ticky-ticky; this ind is here only to help profiling */
/* Enter PAP cost centre -- lexical scoping only */
ENTER_CC_PAP_CL(Node);
@@ -479,7 +479,7 @@ STGFUN(UpdatePAP)
/* set "CC_pap" to go in the updatee (see Sansom thesis, p 183) */
CC_pap /*really cc_enter*/ = (CostCentre) CC_HDR(Node);
- if (IS_SUBSUMED_CC(CC_pap) /*really cc_enter*/)
+ if (IS_CAF_OR_DICT_OR_SUB_CC(CC_pap) /*really cc_enter*/)
CC_pap = CCC;
#endif
@@ -572,7 +572,7 @@ STGFUN(UpdatePAP)
* Take the CC out of the update frame if a CAF/DICT.
*/
- CCC = (IS_CAF_OR_DICT_CC(CC_pap)) ? GRAB_COST_CENTRE(SuB) : CC_pap;
+ CCC = (IS_CAF_OR_DICT_OR_SUB_CC(CC_pap)) ? GRAB_COST_CENTRE(SuB) : CC_pap;
#endif /* PROFILING */
@@ -658,7 +658,7 @@ STGFUN(PAP_entry)
*/
CC_pap = (CostCentre) CC_HDR(Node);
- CCC = (IS_CAF_OR_DICT_CC(CC_pap)) ? GRAB_COST_CENTRE(SuB) : CC_pap;
+ CCC = (IS_CAF_OR_DICT_OR_SUB_CC(CC_pap)) ? GRAB_COST_CENTRE(SuB) : CC_pap;
#endif /* PROFILING */
diff --git a/ghc/runtime/main/Threads.lc b/ghc/runtime/main/Threads.lc
index eba881de14..d8b9801c8a 100644
--- a/ghc/runtime/main/Threads.lc
+++ b/ghc/runtime/main/Threads.lc
@@ -82,7 +82,7 @@ P_ CurrentTSO = NULL;
/* Only needed for GranSim Light; costs of operations during rescheduling
are associated to the virtual processor on which ActiveTSO is living */
P_ ActiveTSO = NULL;
-rtsBool __resched = rtsFalse; /* debugging only !!*/
+rtsBool resched = rtsFalse; /* debugging only !!*/
/* Pointers to the head and tail of the runnable queues for each PE */
/* In GranSim Light only the thread/spark-queues of proc 0 are used */
@@ -236,10 +236,6 @@ P_ topClosure;
}
CurrentProc = MainProc;
-#if 0
- Idlers = RTSflags.GranFlags.proc;
- IdleProcs = ~0l;
-#endif
#endif /* GRAN */
if (DO_QP_PROF)
@@ -413,9 +409,9 @@ P_ topClosure;
TSO_CLOCK(ActiveTSO) = CurrentTime[CurrentProc];
ActiveTSO = NULL;
CurrentTime[CurrentProc] = TSO_CLOCK(CurrentTSO);
- if(RTSflags.GranFlags.DoFairSchedule && __resched )
+ if(RTSflags.GranFlags.DoFairSchedule && resched )
{
- __resched = rtsFalse;
+ resched = rtsFalse;
if (RTSflags.GranFlags.granSimStats &&
RTSflags.GranFlags.debug & 0x20000)
DumpGranEvent(GR_SCHEDULE,ThreadQueueHd);
@@ -542,14 +538,14 @@ int what_next; /* Run the current thread again? */
/* A bit of a hassle if the event queue is empty, but ... */
CurrentTSO = ThreadQueueHd;
- __resched = rtsFalse;
+ resched = rtsFalse;
if (RTSflags.GranFlags.Light &&
TSO_LINK(ThreadQueueHd)!=Prelude_Z91Z93_closure &&
TSO_CLOCK(ThreadQueueHd)>TSO_CLOCK(TSO_LINK(ThreadQueueHd))) {
if(RTSflags.GranFlags.granSimStats &&
RTSflags.GranFlags.debug & 0x20000 )
DumpGranEvent(GR_DESCHEDULE,ThreadQueueHd);
- __resched = rtsTrue;
+ resched = rtsTrue;
ThreadQueueHd = TSO_LINK(CurrentTSO);
if (ThreadQueueHd==Prelude_Z91Z93_closure)
ThreadQueueTl=Prelude_Z91Z93_closure;
@@ -756,6 +752,23 @@ int what_next; /* Run the current thread again? */
continue; /* handle next event in event queue */
case FINDWORK:
+ { /* Make sure that we have enough heap for creating a new
+ thread. This is a conservative estimate of the required heap.
+ This eliminates special checks for GC around NewThread within
+ munch_spark. */
+
+ I_ req_heap = TSO_HS + TSO_CTS_SIZE + STKO_HS +
+ RTSflags.ConcFlags.stkChunkSize;
+
+ if (SAVE_Hp + req_heap >= SAVE_HpLim ) {
+ ReallyPerformThreadGC(req_heap, rtsFalse);
+ SAVE_Hp -= req_heap;
+ if (IS_SPARKING(CurrentProc))
+ MAKE_IDLE(CurrentProc);
+ continue;
+ }
+ }
+
if( RTSflags.GranFlags.DoAlwaysCreateThreads ||
(ThreadQueueHd == Prelude_Z91Z93_closure &&
(RTSflags.GranFlags.FetchStrategy >= 2 ||
@@ -774,10 +787,10 @@ int what_next; /* Run the current thread again? */
/* DaH chu' Qu' yIchen! Now create new work! */
munch_spark (found, prev, spark);
- /* ToDo: check */
+ /* ToDo: check ; not valid if GC occurs in munch_spark
ASSERT(procStatus[CurrentProc]==Starting ||
procStatus[CurrentProc]==Idle ||
- RTSflags.GranFlags.DoAlwaysCreateThreads);
+ RTSflags.GranFlags.DoAlwaysCreateThreads); */
}
continue; /* to the next event */
@@ -913,10 +926,10 @@ do_the_fetchnode(eventq event)
}
# endif
event = grab_event();
- SAVE_Hp -= PACK_HEAP_REQUIRED-1;
+ SAVE_Hp -= PACK_HEAP_REQUIRED;
- /* GC knows that events are special beats and follows the pointer i.e. */
- /* events are valid even if they moved. Hopefully, an EXIT is triggered */
+ /* GC knows that events are special and follows the pointer i.e. */
+ /* events are valid even if they moved. An EXIT is triggered */
/* if there is not enough heap after GC. */
}
} while (rc == 4);
@@ -1246,7 +1259,8 @@ munch_spark (rtsBool found, sparkq prev, sparkq spark)
# endif
new_event(CurrentProc,CurrentProc,CurrentTime[CurrentProc]+1,
FINDWORK,Prelude_Z91Z93_closure,Prelude_Z91Z93_closure,NULL);
- ReallyPerformThreadGC(TSO_HS+TSO_CTS_SIZE,rtsTrue);
+ ReallyPerformThreadGC(TSO_HS+TSO_CTS_SIZE,rtsFalse);
+ SAVE_Hp -= TSO_HS+TSO_CTS_SIZE;
spark = NULL;
return; /* was: continue; */ /* to the next event, eventually */
}
@@ -1258,7 +1272,7 @@ munch_spark (rtsBool found, sparkq prev, sparkq spark)
TSO_EXPORTED(tso) = SPARK_EXPORTED(spark);
TSO_LOCKED(tso) = !SPARK_GLOBAL(spark);
- TSO_SPARKNAME(tso) = (0x1 >> 16) | (NEW_SPARKNAME_MASK & SPARK_NAME(spark)) ;
+ TSO_SPARKNAME(tso) = SPARK_NAME(spark);
new_event(CurrentProc,CurrentProc,CurrentTime[CurrentProc],
STARTTHREAD,tso,node,NULL);
@@ -1266,7 +1280,6 @@ munch_spark (rtsBool found, sparkq prev, sparkq spark)
procStatus[CurrentProc] = Starting;
ASSERT(spark != NULL);
- /* ASSERT(SPARK_PREV(spark)==prev); */
spark = delete_from_spark_queue (prev, spark);
}
@@ -1519,7 +1532,7 @@ enum gran_event_types event_type;
tot_tq_len += thread_queue_len(CurrentProc);
# endif
- ASSERT(TSO_LINK(CurrentTSO)==Prelude_Z91Z93_closure); /* TMP-CHG HWL */
+ ASSERT(TSO_LINK(CurrentTSO)==Prelude_Z91Z93_closure);
/* Idle proc; same for pri spark and basic version */
if(ThreadQueueHd==Prelude_Z91Z93_closure)
@@ -1556,7 +1569,7 @@ enum gran_event_types event_type;
if(RTSflags.GranFlags.Light)
{
ASSERT(ThreadQueueHd!=Prelude_Z91Z93_closure);
- ASSERT(TSO_LINK(tso)==Prelude_Z91Z93_closure); /* TMP-CHG HWL */
+ ASSERT(TSO_LINK(tso)==Prelude_Z91Z93_closure);
/* If only one thread in queue so far we emit DESCHEDULE in debug mode */
if(RTSflags.GranFlags.granSimStats &&
@@ -1564,7 +1577,7 @@ enum gran_event_types event_type;
TSO_LINK(ThreadQueueHd)==Prelude_Z91Z93_closure) {
DumpRawGranEvent(CurrentProc,CurrentProc,GR_DESCHEDULE,
ThreadQueueHd,Prelude_Z91Z93_closure,0);
- __resched = rtsTrue;
+ resched = rtsTrue;
}
if ( InsertThread(tso) ) { /* new head of queue */
@@ -2101,7 +2114,8 @@ PROC proc;
# endif
}
-TIME SparkStealTime()
+TIME
+SparkStealTime(void)
{
double fishdelay, sparkdelay, latencydelay;
fishdelay = (double)RTSflags.GranFlags.proc/2;
@@ -2109,10 +2123,6 @@ TIME SparkStealTime()
((fishdelay-1)/(double)(RTSflags.GranFlags.proc-1))*(double)idlers();
latencydelay = sparkdelay*((double)RTSflags.GranFlags.gran_latency);
-/*
- fprintf(stderr,"fish delay = %g, spark delay = %g, latency delay = %g, Idlers = %u\n",
- fishdelay,sparkdelay,latencydelay,Idlers);
-*/
return((TIME)latencydelay);
}
#endif /* GRAN ; HWL */
@@ -2207,16 +2217,11 @@ I_ name, gran_info, size_info, par_info, local;
/* To make casm more convenient use this function to label strategies */
int
set_sparkname(P_ tso, int name) {
- if (TSO_SPARKNAME(tso) & OLD_SPARKNAME_MASK == 1) {
- TSO_SPARKNAME(tso) &= NEW_SPARKNAME_MASK;
- TSO_SPARKNAME(tso) = TSO_SPARKNAME(tso) >> 16;
- TSO_SPARKNAME(tso) |= name;
- } else {
- TSO_SPARKNAME(tso) = (TSO_SPARKNAME(tso) & OLD_SPARKNAME_MASK) | name ;
- }
+ TSO_SPARKNAME(tso) = name ;
+
if(0 && RTSflags.GranFlags.granSimStats)
DumpRawGranEvent(CurrentProc,99,GR_START,
- tso,Nil_closure,
+ tso,Prelude_Z91Z93_closure,
TSO_SPARKNAME(tso));
/* ^^^ SN (spark name) as optional info */
/* spark_queue_len(CurrentProc,ADVISORY_POOL)); */
@@ -2226,7 +2231,7 @@ set_sparkname(P_ tso, int name) {
int
reset_sparkname(P_ tso) {
- TSO_SPARKNAME(tso) = (TSO_SPARKNAME(tso) & OLD_SPARKNAME_MASK) << 16;
+ TSO_SPARKNAME(tso) = 0;
return (0);
}
@@ -2420,11 +2425,11 @@ W_ type;
#ifdef PAR
TSO_CCC(tso) = (CostCentre)STATIC_CC_REF(CC_MAIN);
#endif
- TSO_NAME(tso) = (P_) INFO_PTR(topClosure); /* A string would be nicer -- JSM */
+ TSO_NAME(tso) = (P_) INFO_PTR(topClosure); /* A string would be nicer -- JSM */
TSO_ID(tso) = threadId++;
TSO_TYPE(tso) = type;
TSO_PC1(tso) = TSO_PC2(tso) = EnterNodeCode;
- TSO_ARG1(tso) = /* TSO_ARG2(tso) = */ 0; /* FIX THIS -- HWL */
+ TSO_ARG1(tso) = /* TSO_ARG2(tso) = */ 0;
TSO_SWITCH(tso) = NULL;
#ifdef TICKY_TICKY
@@ -2583,7 +2588,7 @@ EndThread(STG_NO_ARGS)
if (RTSflags.GranFlags.labelling && RTSflags.GranFlags.granSimStats &&
!RTSflags.GranFlags.granSimStats_suppressed)
DumpStartEventAt(TSO_STARTEDAT(CurrentTSO),where_is(CurrentTSO),0,GR_START,
- CurrentTSO,Nil_closure,
+ CurrentTSO,Prelude_Z91Z93_closure,
TSO_SPARKNAME(CurrentTSO));
/* ^^^ SN (spark name) as optional info */
/* spark_queue_len(CurrentProc,ADVISORY_POOL)); */
diff --git a/ghc/runtime/main/Ticky.lc b/ghc/runtime/main/Ticky.lc
index d0276dc490..ee411cd3c8 100644
--- a/ghc/runtime/main/Ticky.lc
+++ b/ghc/runtime/main/Ticky.lc
@@ -246,12 +246,12 @@ PrintTickyInfo()
I_ tot_returns_in_regs =
RET_NEW_IN_REGS_ctr + RET_OLD_IN_REGS_ctr + RET_SEMI_IN_REGS_ctr;
I_ tot_returns_in_heap =
- RET_NEW_IN_HEAP_ctr + RET_OLD_IN_HEAP_ctr + RET_SEMI_IN_HEAP_ctr + RET_SEMI_BY_DEFAULT_ctr/*???*/;
+ RET_NEW_IN_HEAP_ctr + RET_OLD_IN_HEAP_ctr + RET_SEMI_IN_HEAP_ctr + RET_SEMI_BY_DEFAULT_ctr/*?*/;
I_ tot_returns_of_new =
RET_NEW_IN_REGS_ctr + RET_NEW_IN_HEAP_ctr;
- I_ tot_returns_of_old = /* NB: NOT USED ???! 94/05 WDP */
+ I_ tot_returns_of_old = /* NB: NOT USED ?! 94/05 WDP */
RET_OLD_IN_REGS_ctr + RET_OLD_IN_HEAP_ctr +
- RET_SEMI_BY_DEFAULT_ctr + RET_SEMI_IN_HEAP_ctr + RET_SEMI_IN_REGS_ctr /*???*/;
+ RET_SEMI_BY_DEFAULT_ctr + RET_SEMI_IN_HEAP_ctr + RET_SEMI_IN_REGS_ctr /*?*/;
I_ tot_returns =
tot_returns_in_regs + tot_returns_in_heap;
diff --git a/ghc/runtime/storage/SM1s.lc b/ghc/runtime/storage/SM1s.lc
index e58bc83403..57822b5d96 100644
--- a/ghc/runtime/storage/SM1s.lc
+++ b/ghc/runtime/storage/SM1s.lc
@@ -133,7 +133,7 @@ collectHeap(reqsize, sm, do_full_collection)
#if defined(GRAN)
LinkEvents();
#endif
-#if defined(CONCURRENT) /* && !defined(GRAN) */
+#if defined(CONCURRENT)
LinkSparks();
#endif
#ifdef PAR
@@ -148,10 +148,8 @@ collectHeap(reqsize, sm, do_full_collection)
LINK_LOCATION_TO_CLOSURE(&sm->StablePointerTable);
*/
-# if /* !defined(GRAN) */ /* HWL */
LinkAStack( MAIN_SpA, stackInfo.botA );
LinkBStack( MAIN_SuB, stackInfo.botB );
-# endif
#endif /* parallel */
/* Do Inplace Compaction */
diff --git a/ghc/runtime/storage/SM2s.lc b/ghc/runtime/storage/SM2s.lc
index 7ad2e971a7..953d8f393b 100644
--- a/ghc/runtime/storage/SM2s.lc
+++ b/ghc/runtime/storage/SM2s.lc
@@ -144,10 +144,10 @@ collectHeap(reqsize, sm, do_full_collection)
#if defined(GRAN)
EvacuateEvents();
#endif
-#if defined(CONCURRENT) /* && !defined(GRAN) */
+#if defined(CONCURRENT)
EvacuateSparks();
#endif
-#if !defined(PAR) && !defined(GRAN)
+#if !defined(PAR) /* && !defined(GRAN) */
EvacuateAStack( MAIN_SpA, stackInfo.botA );
EvacuateBStack( MAIN_SuB, stackInfo.botB, &bstk_roots );
#endif /* !PAR */
diff --git a/ghc/runtime/storage/SMap.lc b/ghc/runtime/storage/SMap.lc
index 392caab959..a4ce38219f 100644
--- a/ghc/runtime/storage/SMap.lc
+++ b/ghc/runtime/storage/SMap.lc
@@ -245,10 +245,10 @@ collect2s(W_ reqsize, smInfo *sm)
#if defined(GRAN)
EvacuateEvents();
#endif
-#if defined(CONCURRENT) /* && !defined(GRAN) */
+#if defined(CONCURRENT)
EvacuateSparks();
#endif
-#if !defined(PAR) /* && !defined(GRAN) */ /* HWL */
+#if !defined(PAR)
EvacuateAStack( MAIN_SpA, stackInfo.botA );
EvacuateBStack( MAIN_SuB, stackInfo.botB, &bstk_roots );
#endif /* !PAR */
@@ -289,7 +289,7 @@ collect2s(W_ reqsize, smInfo *sm)
if (RTSflags.GcFlags.giveStats) {
char comment_str[BIG_STRING_LEN];
-#if !defined(PAR) /* && !defined(CONCURRENT) */ /* HWL */
+#if !defined(PAR)
sprintf(comment_str, "%4lu %4ld %3ld %3ld %6lu %6lu %6lu 2s",
(W_) (SUBTRACT_A_STK(MAIN_SpA, stackInfo.botA) + 1),
bstk_roots, sm->rootno,
@@ -441,6 +441,31 @@ collectHeap(reqsize, sm, do_full_collection)
mutable++;
}
+#if 0 && defined(GRAN)
+ {
+ extern ex_RBH_q;
+ closq prev_ptr, clos_ptr;
+
+ DEBUG_STRING("Evacuate reverted RBHs:");
+ clos_ptr = ex_RBH_q;
+ while ( clos_ptr ) {
+
+ /* Scavenge the OldMutable */
+ P_ info = (P_) INFO_PTR(CLOS_CLOSURE(clos_ptr));
+ StgScavPtr scav_code = SCAV_CODE(info);
+ Scav = CLOS_CLOSURE(clos_ptr);
+ (scav_code)();
+
+ /* No mutable closure are put on the ex_RBH_q */
+ /* ASSERT(IS_MUTABLE(info)); */
+ prev_ptr = clos_ptr;
+ clos_ptr = CLOS_NEXT(clos_ptr);
+ free(prev_ptr);
+ }
+ ex_RBH_q = NULL;
+ }
+#endif /* GRAN */
+
#ifdef PAR
EvacuateLocalGAs(rtsFalse);
#else
@@ -468,10 +493,10 @@ collectHeap(reqsize, sm, do_full_collection)
#if defined(GRAN)
EvacuateEvents();
#endif
-#if defined(CONCURRENT) /* && !defined(GRAN) */
+#if defined(CONCURRENT)
EvacuateSparks();
#endif
-#if !defined(PAR) /* && !defined(GRAN) */ /* HWL */
+#if !defined(PAR)
EvacuateAStack( MAIN_SpA, stackInfo.botA );
EvacuateBStack( MAIN_SuB, stackInfo.botB, &bstk_roots );
/* ToDo: Optimisation which squeezes out garbage update frames */
@@ -506,7 +531,7 @@ collectHeap(reqsize, sm, do_full_collection)
if (RTSflags.GcFlags.giveStats) {
char minor_str[BIG_STRING_LEN];
-#if !defined(PAR) /* && !defined(CONCURRENT) */ /* HWL */
+#if !defined(PAR)
sprintf(minor_str, "%4lu %4ld %3ld %3ld %4ld Minor",
(W_) (SUBTRACT_A_STK(MAIN_SpA, stackInfo.botA) + 1),
bstk_roots, sm->rootno, caf_roots, mutable); /* oldnew_roots, old_words */
@@ -617,7 +642,7 @@ collectHeap(reqsize, sm, do_full_collection)
#if defined(GRAN)
LinkEvents();
#endif
-#if defined(CONCURRENT) /* && !defined(GRAN) */
+#if defined(CONCURRENT)
LinkSparks();
#endif
#ifdef PAR
@@ -627,11 +652,9 @@ collectHeap(reqsize, sm, do_full_collection)
DEBUG_STRING("Linking Stable Pointer Table:");
LINK_LOCATION_TO_CLOSURE(&sm->StablePointerTable);
*/
-#if 1 /* !defined(GRAN) */ /* HWL */
LinkAStack( MAIN_SpA, stackInfo.botA );
LinkBStack( MAIN_SuB, stackInfo.botB );
#endif
-#endif
/* Do Inplace Compaction */
/* Returns start of next closure, -1 gives last allocated word */
@@ -679,7 +702,7 @@ collectHeap(reqsize, sm, do_full_collection)
if (RTSflags.GcFlags.giveStats) {
char major_str[BIG_STRING_LEN];
-#if !defined(PAR) /* && !defined(CONCURRENT) */ /* HWL */
+#if !defined(PAR)
sprintf(major_str, "%4lu %4ld %3ld %3ld %4d %4d *Major* %4.1f%%",
(W_) (SUBTRACT_A_STK(MAIN_SpA, stackInfo.botA) + 1),
bstk_roots, sm->rootno, appelInfo.OldCAFno,
@@ -719,6 +742,8 @@ collectHeap(reqsize, sm, do_full_collection)
return( GC_SOFT_LIMIT_EXCEEDED ); /* Heap nearly exhausted */
} else {
return( GC_SUCCESS ); /* Heap OK */
+ /* linked = IS_MUTABLE(INFO_PTR(closure)) && MUT_LINK(closure) !=
+ MUT_NOT_LINKED; */
}
}
diff --git a/ghc/runtime/storage/SMcopying.lc b/ghc/runtime/storage/SMcopying.lc
index 77fbd8b135..77c4124355 100644
--- a/ghc/runtime/storage/SMcopying.lc
+++ b/ghc/runtime/storage/SMcopying.lc
@@ -207,7 +207,7 @@ Note: no \tr{evacuate[AB]Stack} for ``parallel'' systems, because they
don't have a single main stack.
\begin{code}
-#if !defined(PAR) /* && !defined(GRAN) */ /* HWL */
+#if !defined(PAR)
void
EvacuateAStack(PP_ stackA, PP_ botA /* botA points to bottom-most word */)
{
@@ -235,7 +235,7 @@ EVACUATED_INFOPTR)
Otherwise closure is live update reference to to-space address
\begin{code}
-#if !defined(PAR) /* && !defined(GRAN) */ /* HWL */
+#if !defined(PAR)
void
EvacuateBStack( stackB, botB, roots )
P_ stackB;
diff --git a/ghc/runtime/storage/SMmark.lhc b/ghc/runtime/storage/SMmark.lhc
index 72ea1d3f5c..b1a5aa218f 100644
--- a/ghc/runtime/storage/SMmark.lhc
+++ b/ghc/runtime/storage/SMmark.lhc
@@ -1738,7 +1738,6 @@ STGFUN(_PRMarking_MarkNextGA)
}
#else
-#if 1 /* !defined(CONCURRENT) */ /* HWL */
STGFUN(_PRMarking_MarkNextAStack)
{
FUNBEGIN;
@@ -1771,7 +1770,6 @@ STGFUN(_PRMarking_MarkNextBStack)
JUMP_MARK;
FUNEND;
}
-#endif /* !CONCURRENT */
#endif /* PAR */
\end{code}
diff --git a/ghc/runtime/storage/SMmarking.lc b/ghc/runtime/storage/SMmarking.lc
index d1eb76e4c8..72972227bd 100644
--- a/ghc/runtime/storage/SMmarking.lc
+++ b/ghc/runtime/storage/SMmarking.lc
@@ -47,12 +47,10 @@ EXTDATA(_PRMarking_MarkNextClosureInFetchBuffer_closure);
EXTFUN(_PRMarking_MarkNextGA);
EXTDATA(_PRMarking_MarkNextGA_closure);
#else
-# if 1 /* !defined(GRAN) */ /* HWL */
EXTFUN(_PRMarking_MarkNextAStack);
EXTFUN(_PRMarking_MarkNextBStack);
EXTDATA(_PRMarking_MarkNextAStack_closure);
EXTDATA(_PRMarking_MarkNextBStack_closure);
-# endif
#endif /* not parallel */
P_ sm_roots_end;
@@ -157,7 +155,6 @@ markHeapRoots(sm, cafs1, cafs2, base, lim, bit_array)
} while (!found && MRoot != NULL);
DEBUG_STRING("Marking Sparks (GRAN):");
- /* ToDo: Check this code */
for(proc = 0; proc < RTSflags.GranFlags.proc; ++proc) {
for(pool = 0; pool < SPARK_POOLS; pool++) {
MRoot = (P_) PendingSparksHd[proc][pool];
@@ -195,7 +192,6 @@ markHeapRoots(sm, cafs1, cafs2, base, lim, bit_array)
miniInterpret((StgFunPtr) _startMarkWorld);
}
#else
-# if 1 /* !defined(GRAN) */ /* HWL */
/* Note: no *external* stacks in parallel/concurrent world */
DEBUG_STRING("Marking A Stack:");
@@ -214,7 +210,6 @@ markHeapRoots(sm, cafs1, cafs2, base, lim, bit_array)
MStack = (P_) _PRMarking_MarkNextBStack_closure;
miniInterpret((StgFunPtr)_startMarkWorld);
}
-# endif /* ! CONCURRENT */
#endif /* PAR */
DEBUG_STRING("Marking & Updating CAFs:");
diff --git a/ghc/utils/Jmakefile b/ghc/utils/Jmakefile
index ab32c2b229..eac3396d58 100644
--- a/ghc/utils/Jmakefile
+++ b/ghc/utils/Jmakefile
@@ -2,13 +2,16 @@
SUBDIRS = hp2ps \
hscpp \
- hstags \
mkdependHS \
parallel \
stat2resid \
ugen \
unlit
+/* hstags
+ not ready to go for 2.01
+*/
+
/* "heap-view" is not in the list because (a) it requires
a Haskell compiler (which you may not have yet), and (b) you are
unlikely to want it desperately. It is easy to build once you have
diff --git a/ghc/utils/hp2ps/TraceElement.h b/ghc/utils/hp2ps/TraceElement.h
index 03b151cc41..d843392a23 100644
--- a/ghc/utils/hp2ps/TraceElement.h
+++ b/ghc/utils/hp2ps/TraceElement.h
@@ -1,6 +1,6 @@
#ifndef TRACE_ELEMENT_H
#define TRACE_ELEMENT_H
-TraceElement PROTO((void));
+void TraceElement PROTO((void));
#endif /* TRACE_ELEMENT_H */
diff --git a/ghc/utils/hstags/README b/ghc/utils/hstags/README
index 388a8e869b..b457ef125a 100644
--- a/ghc/utils/hstags/README
+++ b/ghc/utils/hstags/README
@@ -3,8 +3,8 @@ files for Glasgow-Haskell-compilable programs. (It is "sophisticated"
only in that it uses the GHC parser to find "interesting" things in
the source files.)
+With GHC 2.01: doesn't work yet.
+
A simpler alternative is Denis Howe's "fptags" script, which is
distributed in the ghc/CONTRIB directory.
-Will Partain
-Sept 1994
diff --git a/ghc/utils/mkdependHS/mkdependHS.prl b/ghc/utils/mkdependHS/mkdependHS.prl
index c216394416..46047e1633 100644
--- a/ghc/utils/mkdependHS/mkdependHS.prl
+++ b/ghc/utils/mkdependHS/mkdependHS.prl
@@ -83,15 +83,15 @@ if (! $ENV{'GLASGOW_HASKELL_ROOT'}) { # good -- death to environment variables
} else {
$TopPwd = $ENV{'GLASGOW_HASKELL_ROOT'};
- if ( '$(INSTLIBDIR_GHC)' =~ /\/local\/fp(\/.*)/ ) {
+ if ('$(INSTLIBDIR_GHC)' =~ /.*(\/lib\/ghc\/\d\.\d\d\/[^-]-[^-]-[^-]\/.*)/) {
$InstLibDirGhc = $ENV{'GLASGOW_HASKELL_ROOT'} . $1;
} else {
print STDERR "GLASGOW_HASKELL_ROOT environment variable is set;\nBut can't untangle $(INSTLIBDIR_GHC).\n(Installation error)\n";
exit(1);
}
- if ( '$(INSTDATADIR_GHC)' =~ /\/local\/fp(\/.*)/ ) {
- $InstDataDirGhc = $ENV{'GLASGOW_HASKELL_ROOT'} . $1;
+ if ('$(INSTDATADIR_GHC)' =~ /.*(\/lib\/ghc\/\d\.\d\d\/.*)/) {
+ $InstDataDirGhc = $ENV{'GLASGOW_HASKELL_ROOT'} . $2;
} else {
print STDERR "GLASGOW_HASKELL_ROOT environment variable is set;\nBut can't untangle $(INSTDATADIR_GHC).\n(Installation error)\n";
exit(1);
@@ -104,72 +104,27 @@ $Unlit = ( $(INSTALLING) ) ? "$InstLibDirGhc/unlit"
$Begin_magic_str = "# DO NOT DELETE: Beginning of Haskell dependencies\n";
$End_magic_str = "# DO NOT DELETE: End of Haskell dependencies\n";
$Obj_suffix = '.o';
-$ghc_version_info = $(PROJECTVERSION) * 100;
+$ghc_version_info = int ( $(PROJECTVERSION) * 100 );
$Import_dirs = '.';
%Syslibs = ();
+%LibIfaces = (); # known prelude/syslib ifaces; read from a file
%IgnoreMe = ();
-%PreludeIfaces = ( 'Prelude', '1',
- , 'Array', '1'
- , 'Char', '1'
- , 'Complex', '1'
- , 'Directory', '1'
- , 'IO', '1'
- , 'Ix', '1'
- , 'List', '1'
- , 'Maybe', '1'
- , 'Monad', '1'
- , 'Ratio', '1'
- , 'System', '1'
- , 'PreludeGlaST', '1'
- , 'PreludeGlaMisc','1'
- , 'Concurrent', '1'
- , 'Parallel', '1');
-%GhcLibIfaces = ( 'Bag', '1',
- 'BitSet', '1',
- # CharSeq not supposed to be used by user (I think. WDP)
- 'FiniteMap', '1',
- 'ListSetOps', '1',
- 'Maybes', '1',
- 'PackedString', '1',
- 'Regex', '1',
- 'MatchPS', '1',
- 'Readline', '1',
- 'Socket', '1',
- 'SocketPrim', '1',
- 'BSD', '1',
- 'Pretty', '1',
- 'Set', '1',
- 'Util', '1' );
-%HbcLibIfaces = ( 'Algebra', '1',
- 'Hash', '1',
- 'ListUtil', '1',
- 'Miranda', '1',
- 'NameSupply', '1',
- 'Native', '1',
- 'Number', '1',
- 'Parse', '1',
- 'Pretty', '1',
- 'Printf', '1',
- 'QSort', '1',
- 'Random', '1',
- 'SimpleLex', '1',
- 'Time', '1',
- 'Trace', '1',
- 'Word', '1' );
-%IO13Ifaces = ( 'LibSystem', '1',
- 'LibCPUTime', '1',
- 'LibDirectory', '1',
- 'LibPosix', '1',
- 'LibTime', '1' );
-
-$Haskell_1 = 2; # assume Haskell 1.2, still. Changed by -fhaskell-1.3
+
+$Haskell_1 = 3; # assume Haskell 1.3. Changed by -fhaskell-1.?
$Include_dirs = '-I.';
$Makefile = '';
@Src_files = ();
&mangle_command_line_args();
+# load up LibIfaces tables:
+&read_MODULES('prelude', 'prelude');
+foreach $lib ( @Syslibs ) {
+ &read_MODULES('syslib', $lib);
+}
+#print STDERR "libs provide:",(keys %LibIfaces),"\n";
+
if ( $Status ) {
print stderr $Usage;
exit(1);
@@ -206,7 +161,7 @@ foreach $sf (@Src_files) {
# builds up @Depend_lines
print STDERR "Here we go for source file: $sf\n" if $Verbose;
($bf = $sf) =~ s/\.l?hs$//;
- push(@Depend_lines, "$bf$Obj_suffix : $sf\n");
+ push(@Depend_lines, "$bf$Obj_suffix $bf.hi : $sf\n");
foreach $suff (@File_suffix) {
push(@Depend_lines, "$bf$suff$Obj_suffix : $sf\n");
}
@@ -297,6 +252,31 @@ sub mangle_command_line_args {
@File_suffix = sort (@File_suffix);
}
+sub read_MODULES {
+ local($flavor,$lib) = @_;
+
+ local($m_dir) = '';
+ if ($flavor eq 'prelude') {
+ $m_dir = ( $(INSTALLING) ) ? "$InstDataDirGhc/imports" : "$TopPwd/$(CURRENT_DIR)/$(GHC_LIBSRC)";
+ } else {
+ $m_dir = ( $(INSTALLING) ) ? "$InstSysLibDir/$lib" : "$TopPwd/hslibs/$lib";
+ }
+ local($m_file) = "$m_dir/MODULES";
+
+ open(MFILE, "< $m_file") || die "$Pgm: can't open $m_file to read\n";
+ while (<MFILE>) {
+ chop;
+ # strip comments and leading/trailing whitespace
+ s/#.*//;
+ s/^\s+//;
+ s/\s+$//;
+ next if /^$/; # nothing left!
+
+ $LibIfaces{"$lib:$_"} = 1; # record that this library provides this iface
+ }
+ close(MFILE);
+}
+
sub grab_arg_arg {
local($option, $rest_of_arg) = @_;
@@ -351,9 +331,9 @@ sub slurp_file_for_imports {
|| die "$Pgm: Can't open $file_to_read: $!\n";
while (<SRCFILE>) {
- next unless (/^>?\s*(import)\s+([A-Z][A-Za-z0-9_']*)/ || /^!(include)\s+"(\S+)"/);
+ next unless (/^>?\s*(import)(\s+qualified)?\s+([A-Z][A-Za-z0-9_']*)/ || /^!(include)(\s+)"(\S+)"/);
$todo = $1;
- $modname = $2;
+ $modname = $3;
if ($todo eq 'import') {
if ( $IgnoreMe{$modname} eq 'y' ) {
@@ -462,24 +442,14 @@ sub find_in_Import_dirs {
print STDERR "trying... $name_to_check\n" if $Verbose >= 2; # very verbose
return($name_to_check) if -f $name_to_check;
}
+
# OK, maybe it's referring to something in a system library
foreach $lib ( @Syslibs ) {
- if ( $lib eq 'ghc' ) {
- return('__ignore__') if $GhcLibIfaces{$modname};
- } elsif ( $lib eq 'hbc' ) {
- return('__ignore__') if $HbcLibIfaces{$modname};
- } else {
- die "Unrecognised syslib: $lib\n";
- }
- }
-
- # Might be a Haskell 1.3 Module (but only if we've said -fhaskell-1.3)
- if ( $Haskell_1 >= 3 ) {
- return('__ignore__') if $IO13Ifaces{$modname};
+ return('__ignore__') if $LibIfaces{"$lib:$modname"};
}
# Last hope: referring to a Prelude interface
- return('__ignore__') if $PreludeIfaces{$modname};
+ return('__ignore__') if $LibIfaces{"prelude:$modname"};
die "No file `$modname.hi', `$modname.lhs' or `$modname.hs' (reqd from file `$orig_src_file')\namong import directories:\n\t$Import_dirs\n";
}
diff --git a/ghc/utils/parallel/AVG.pl b/ghc/utils/parallel/AVG.pl
new file mode 100644
index 0000000000..9ec42aee2f
--- /dev/null
+++ b/ghc/utils/parallel/AVG.pl
@@ -0,0 +1,108 @@
+#!/usr/local/bin/perl
+# (C) Hans Wolfgang Loidl, October 1995
+#############################################################################
+# Time-stamp: <Thu Oct 26 1995 18:30:54 Stardate: [-31]6498.64 hwloidl>
+#
+# Usage: AVG [options] <gr-file>
+#
+# A quich hack to get avg runtimes of different spark sites. Similar to SPLIT.
+#
+# Options:
+# -s <list> ... a perl list of spark names; the given <gr-file> is scanned
+# for each given name in turn and granularity graphs are
+# generated for each of these sparks
+# -O ... use gr2RTS and RTS2gran instead of gran-extr;
+# this generates fewer output files (only granularity graphs)
+# but should be faster and far less memory consuming
+# -h ... help; print this text.
+# -v ... verbose mode.
+#
+#############################################################################
+
+require "getopts.pl";
+
+&Getopts('hvOs:');
+
+do process_options();
+
+if ( $opt_v ) { do print_verbose_message(); }
+
+# ---------------------------------------------------------------------------
+# Init
+# ---------------------------------------------------------------------------
+
+foreach $s (@sparks) {
+ # extract END events for this spark-site
+ open (GET,"cat $input | tf -s $s | avg-RTS") || die "!$\n";
+}
+
+exit 0;
+
+exit 0;
+
+# -----------------------------------------------------------------------------
+
+sub process_options {
+
+ if ( $opt_h ) {
+ open(ME,$0) || die "Can't open myself ($0): $!\n";
+ $n = 0;
+ while (<ME>) {
+ last if $_ =~ /^$/;
+ print $_;
+ $n++;
+ }
+ close(ME);
+ exit ;
+ }
+
+ if ( $opt_s ) {
+ $opt_s =~ s/[\(\)\[\]]//g;
+ @sparks = split(/[,;. ]+/, $opt_s);
+ } else {
+ @sparks = ( 3, 4, 5, 6, 7, 8, 11, 12, 13, 14, 15);
+ }
+
+ if ( $#ARGV != 0 ) {
+ print "Usage: $0 [options] <gr-file>\n;";
+ print "Use -h option to get details\n";
+ exit 1;
+ }
+
+ $gr_file = $ARGV[0];
+ ($basename = $gr_file) =~ s/\.gr//;
+ $rts_file = $basename . ".rts"; # "RTS";
+ $gran_file = "g.ps"; # $basename . ".ps";
+ #$rts_file = $gr_file;
+ #$rts_file =~ s/\.gr/.rts/g;
+
+ if ( $opt_o ) {
+ $va_file = $opt_o;
+ $va_dvi_file = $va_file;
+ $va_dvi_file =~ s/\.tex/.dvi/g;
+ $va_ps_file = $va_file;
+ $va_ps_file =~ s/\.tex/.ps/g;
+ } else {
+ $va_file = "va.tex";
+ $va_dvi_file = "va.dvi";
+ $va_ps_file = "va.ps";
+ }
+
+ if ( $opt_t ) {
+ $template_file = $opt_t;
+ } else {
+ $template_file = "TEMPL";
+ }
+
+ $tmp_file = ",t";
+}
+
+# -----------------------------------------------------------------------------
+
+sub print_verbose_message {
+ print "Sparks: (" . join(',',@sparks) . ")\n";
+ print "Files: .gr " . $gr_file . " template " . $template_file .
+ " va " . $va_file . "\n";
+}
+
+# -----------------------------------------------------------------------------
diff --git a/ghc/utils/parallel/GrAnSim.el b/ghc/utils/parallel/GrAnSim.el
new file mode 100644
index 0000000000..49330a9749
--- /dev/null
+++ b/ghc/utils/parallel/GrAnSim.el
@@ -0,0 +1,432 @@
+;; ---------------------------------------------------------------------------
+;; Time-stamp: <Tue Jun 11 1996 18:01:28 Stardate: [-31]7643.54 hwloidl>
+;;
+;; Mode for GrAnSim profiles
+;; ---------------------------------------------------------------------------
+
+(defvar gransim-auto-hilit t
+ "Automagically invoke hilit19.")
+
+(defvar grandir (getenv "GRANDIR")
+ "Root of the GrAnSim installation. Executables should be in grandir/bin")
+
+(defvar hwl-hi-node-face 'highlight
+ "Face to be used for specific highlighting of a node")
+
+(defvar hwl-hi-thread-face 'holiday-face
+ "Face to be used for specific highlighting of a thread")
+
+;; ---------------------------------------------------------------------------
+
+(setq exec-path (cons (concat grandir "/bin") exec-path))
+
+;; Requires hilit19 for highlighting parts of a GrAnSim profile
+(cond (window-system
+ (setq hilit-mode-enable-list '(not text-mode)
+ hilit-background-mode 'light
+ hilit-inhibit-hooks nil
+ hilit-inhibit-rebinding nil);
+
+ (require 'hilit19)
+))
+
+
+(setq auto-mode-alist
+ (append '(("\\.gr" . gr-mode))
+ auto-mode-alist))
+
+(defvar gr-mode-map (make-keymap "GrAnSim Profile Mode SetUp")
+ "Keymap for GrAnSim profiles.")
+
+; (fset 'GrAnSim-mode-fiddly gr-mode-map)
+
+;(define-key gr-mode-map [wrap]
+; '("Wrap lines" . hwl-wrap))
+
+;(define-key gr-mode-map [truncate]
+; '("Truncate lines" . hwl-truncate))
+
+;(define-key global-map [C-S-down-mouse-1] 'GrAnSim-mode-fiddly)
+
+;(modify-frame-parameters (selected-frame)
+; '((menu-bar-lines . 2)))
+
+;(define-key-after gr-mode-map [menu-bar GrAnSim]
+; '("GrAnSim" . (make-sparse-keymap "GrAnSim")) 'edit)
+
+;(defvar GrAnSim-menu-map (make-sparse-keymap "GrAnSim"))
+
+(define-key gr-mode-map [menu-bar GrAnSim]
+ (cons "GrAnSim" (make-sparse-keymap "GrAnSim"))) ; 'edit)
+
+(define-key gr-mode-map [menu-bar GrAnSim wrap]
+ '("Wrap lines" . hwl-wrap))
+
+(define-key gr-mode-map [menu-bar GrAnSim truncate]
+ '("Truncate lines" . hwl-truncate))
+
+(define-key gr-mode-map [menu-bar GrAnSim toggle-truncate]
+ '("Toggle truncate/wrap" . hwl-toggle-truncate-wrap) )
+
+(define-key gr-mode-map [menu-bar GrAnSim hi-clear]
+ '("Clear highlights" . hwl-hi-clear))
+
+(define-key gr-mode-map [menu-bar GrAnSim hi-thread]
+ '("Highlight specific Thread" . hwl-hi-thread))
+
+(define-key gr-mode-map [menu-bar GrAnSim hi-node]
+ '("Highlight specific Node" . hwl-hi-node))
+
+(define-key gr-mode-map [menu-bar GrAnSim highlight]
+ '("Highlight buffer" . hilit-rehighlight-buffer))
+
+(define-key gr-mode-map [menu-bar GrAnSim narrow-event]
+ '("Narrow to Event" . hwl-narrow-to-event))
+
+(define-key gr-mode-map [menu-bar GrAnSim narrow-thread]
+ '("Narrow to Thread" . hwl-narrow-to-thread))
+
+(define-key gr-mode-map [menu-bar GrAnSim narrow-pe]
+ '("Narrow to PE" . hwl-narrow-to-pe))
+
+
+
+; (define-key global-map [C-S-down-mouse-1] 'GrAnSim-mode-fiddly)
+
+
+(defvar gr-mode-hook nil
+ "Invoked in gr mode.")
+
+
+;;; Ensure new buffers won't get this mode if default-major-mode is nil.
+;(put 'gr-mode 'mode-class 'special)
+
+(defun gr-mode ()
+ "Major mode for GrAnSim profiles."
+ (interactive)
+ (kill-all-local-variables)
+ ;(use-local-map gr-mode-map)
+ (use-local-map gr-mode-map) ; This provides the local keymap.
+ (setq major-mode 'gr-mode)
+ (setq mode-name "GrAnSim Profile Mode")
+ (setq local-abbrev-table text-mode-abbrev-table)
+ (set-syntax-table text-mode-syntax-table)
+ (setq truncate-lines t) ; do not wrap lines (truncates END lines!)
+ (auto-save-mode -1)
+ ;(setq buffer-offer-save t)
+ (run-hooks 'gr-mode-hook))
+
+;; same as mh-make-local-vars
+(defun gr-make-local-vars (&rest pairs)
+ ;; Take VARIABLE-VALUE pairs and make local variables initialized to the
+ ;; value.
+ (while pairs
+ (make-variable-buffer-local (car pairs))
+ (set (car pairs) (car (cdr pairs)))
+ (setq pairs (cdr (cdr pairs)))))
+
+;; ----------------------------------------------------------------------
+;; Highlighting stuff (currently either hilit19 or fontlock is used)
+;; ----------------------------------------------------------------------
+
+(hilit-set-mode-patterns
+ 'gr-mode
+ '(;; comments
+ ("--.*$" nil comment)
+ ("\\+\\+.*$" nil comment)
+ ;; hilight important bits in the header
+ ("^Granularity Simulation for \\(.*\\)$" 1 glob-struct)
+ ("^PEs[ \t]+\\([0-9]+\\)" 1 decl)
+ ("^Latency[ \t]+\\([0-9]+\\)" 1 decl)
+ ("Arith[ \t]+\\([0-9]+\\)" 1 decl)
+ ("Branch[ \t]+\\([0-9]+\\)" 1 decl)
+ ("Load[ \t]+\\([0-9]+\\)" 1 decl)
+ ("Store[ \t]+\\([0-9]+\\)" 1 decl)
+ ("Float[ \t]+\\([0-9]+\\)" 1 decl)
+ ("Alloc[ \t]+\\([0-9]+\\)" 1 decl)
+ ;; hilight PE number and time in each line
+ ("^PE[ \t]+\\([0-9]+\\)" 1 glob-struct)
+ (" \\[\\([0-9]+\\)\\]:" 1 define)
+ ;; in this case the events are the keyword
+ ; ("\\(FETCH\\|REPLY\\|RESUME\\|RESUME(Q)\\|SCHEDULE\\|SCHEDULE(Q)\\|BLOCK\\|STEALING\\|STOLEN\\|STOLEN(Q)\\)[ \t]" 1 keyword)
+ ("\\(FETCH\\|BLOCK\\)[ \t]" 1 label)
+ ("\\(REPLY\\|RESUME(Q)\\|SCHEDULE(Q)\\|STOLEN(Q)\\)[ \t]" 1 named-param)
+ ("\\(RESUME\\|SCHEDULE\\|STOLEN\\)[ \t]" 1 msg-quote)
+ ("\\(STEALING\\)[ \t]" 1 keyword)
+ ("\\(START\\|END\\)[ \t]" 1 defun)
+ ("\\(SPARK\\|SPARKAT\\|USED\\|PRUNED\\)[ \t]" 1 crossref)
+ ("\\(EXPORTED\\|ACQUIRED\\)[ \t]" 1 string)
+ ;; especially interesting are END events; hightlight runtime etc
+ (",[ \t]+RT[ \t]+\\([0-9]+\\)" 1 define)
+ ;; currently unused but why not?
+ ("\"" ".*\"" string))
+)
+
+;; --------------------------------------------------------------------------
+;; Own fcts for selective highlighting
+;; --------------------------------------------------------------------------
+
+(defun hwl-hi-node (node)
+ "Highlight node in GrAnSim profile."
+ (interactive "sNode (hex): ")
+ (save-excursion
+ (let* ( (here (point))
+ (len (length node)) )
+ (goto-char (point-min))
+ (while (search-forward node nil t)
+ (let* ( (end (point))
+ (start (- end len)) )
+ (add-text-properties start end `(face ,hwl-hi-node-face))
+ )
+ ) )
+ )
+)
+
+(defun hwl-hi-thread (task)
+ "Highlight task in GrAnSim profile."
+ (interactive "sTask: ")
+ (save-excursion
+ (let* ( (here (point))
+ (len (length task))
+ (se-str (format "[A-Z)]\\s-+%s\\(\\s-\\|,\\)" task))
+ )
+ (goto-char (point-min))
+ (while (re-search-forward se-str nil t)
+ (let ( (c (current-column)) )
+ (if (and (> c 10) (< c 70))
+ (let* ( (end (1- (point)))
+ (start (- end len)) )
+ (add-text-properties start end `(face ,hwl-hi-thread-face))
+ ) ) )
+ ) )
+ )
+)
+
+(defun hwl-hi-line ()
+ "Highlight the current line."
+ (interactive)
+ (save-excursion
+ (beginning-of-line)
+ (let ( (beg (point)) )
+ (end-of-line)
+ (add-text-properties beg (point) '(face highlight))
+ )
+ )
+)
+
+(defun hwl-unhi-line ()
+ "Unhighlight the current line."
+ (interactive)
+ (save-excursion
+ (beginning-of-line)
+ (let ( (beg (point)) )
+ (end-of-line)
+ (add-text-properties beg (point) '(face nil))
+ )
+ )
+)
+
+; Doesn't work yet
+(defun hwl-hi-from-to (from to)
+ "Highlight region between two timestamps."
+ (interactive "nFrom: \nnTo:")
+ (save-excursion
+ (let* ( (here (point))
+ (now 0)
+ start end
+ (separator '"+++++")
+ )
+ (goto-char (point-min))
+ ; (re-search-forward REGEXP)
+ (search-forward separator nil t)
+ (forward-line)
+ (while (< now from)
+ (beginning-of-line)
+ (forward-line)
+ (forward-char 7)
+ (setq beg (point))
+ (search-forward "]")
+ (setq time-str (buffer-substring beg (- (point) 2)))
+ (setq now (string-to-number time-str))
+ )
+ (if (< now from)
+ nil
+ (setq start (point))
+ (while (< now to)
+ (beginning-of-line)
+ (forward-line)
+ (forward-char 7)
+ (setq beg (point))
+ (search-forward "]")
+ (setq time-str (buffer-substring beg (- (point) 2)))
+ (setq now (string-to-number time-str))
+ )
+ (if (< now to)
+ nil
+ (setq end (point))
+ (add-text-properties start end '(face paren-match-face))
+ )
+ )
+ ) ; let
+ ) ; excursion
+)
+
+(defun hwl-hi-clear ()
+ (interactive)
+ (let ( (start (point-min) )
+ (end (point-max)) )
+ (remove-text-properties start end '(face nil))
+ )
+)
+
+;; --------------------------------------------------------------------------
+;; Misc Elisp functions
+;; --------------------------------------------------------------------------
+
+(defun hwl-wrap ()
+ (interactive)
+ (setq truncate-lines nil)
+ (hilit-recenter nil)
+)
+
+(defun hwl-truncate ()
+ (interactive)
+ (setq truncate-lines t)
+ (hilit-recenter nil)
+)
+
+(defun hwl-toggle-truncate-wrap ()
+ (interactive)
+ (if truncate-lines (setq truncate-lines nil)
+ (setq truncate-lines t))
+ (hilit-recenter nil)
+)
+
+(defun hwl-narrow-to-pe (pe)
+ (interactive "nPE: ")
+ (hwl-narrow 1 pe "")
+)
+
+(defun hwl-narrow-to-thread (thread)
+ (interactive "sThread: ")
+ (hwl-narrow 2 thread "")
+)
+
+(defun hwl-narrow-to-event (event)
+ (interactive "sEvent: ")
+ (hwl-narrow 3 0 event)
+)
+
+(defun hwl-narrow (mode id str)
+ ( let* ((outbuffer (get-buffer-create "*GrAnSim Narrowed*"))
+ ;(from (beginning-of-buffer))
+ ;(to (end-of-buffer))
+ ;(to (point)) ; (region-end))
+ ;(text (buffer-substring from to)) ; contains text in region
+ (w (selected-window))
+ ;(nh 5) ; height of new window
+ ;(h (window-height w)) ; height of selcted window
+ ;(h1 (if (<= h nh) (- h 1) (- h nh))) ; height of old window
+ (w1 (get-buffer-window outbuffer 'visible))
+
+ (infile (buffer-file-name)) ; or
+ (inbuffer (current-buffer))
+ (command "tf")
+ ;(mode_opt (cond ((eq mode 1) "-p")
+ ; ((eq mode 2) "-t")
+ ; ((eq mode 3) "-e")
+ ; (t "-v")))
+ )
+ (if w1 (message "Window *GrAnSim Narrowed* already visible")
+ (split-window w nil nil))
+ (switch-to-buffer-other-window outbuffer)
+ (erase-buffer)
+ (setq truncate-lines t)
+ (gr-mode)
+ ;(beginning-of-buffer)
+ ;(set-mark)
+ ;(end-of-buffer)
+ ;(delete-region region-beginning region-end)
+ (cond ((eq mode 1)
+ ;(message (format "Narrowing to Processor %d" id))
+ (call-process command nil outbuffer t "-p" (format "%d" id) infile ))
+ ((eq mode 2)
+ ;(message (format "Narrowing to Thread %d" id))
+ (call-process command nil outbuffer t "-t" (format "%s" id) infile ))
+ ((eq mode 3)
+ ;(message (format "Narrowing to Event %s" str))
+ (call-process command nil outbuffer t "-e" str infile ))
+ )
+ )
+)
+
+(defun hwl-command-on-buffer (prg opts file)
+ (interactice "CProgram:\nsOptions:\nfFile:")
+ ( let* ((outbuffer (get-buffer-create "*GrAnSim Command*"))
+ (from (beginning-of-buffer))
+ (to (end-of-buffer))
+ ;(to (point)) ; (region-end))
+ ;(text (buffer-substring from to)) ; contains text in region
+ (w (selected-window))
+ ;(nh 5) ; height of new window
+ ;(h (window-height w)) ; height of selcted window
+ ;(h1 (if (<= h nh) (- h 1) (- h nh))) ; height of old window
+ (w1 (get-buffer-window outbuffer 'visible))
+
+ (infile (buffer-file-name)) ; or
+ (inbuffer (current-buffer))
+ ;(command "tf")
+ ;(mode_opt (cond ((eq mode 1) "-p")
+ ; ((eq mode 2) "-t")
+ ; ((eq mode 3) "-e")
+ ; (t "-v")))
+ )
+ (if w1 (message "Window *GrAnSim Command* already visible")
+ (split-window w nil nil))
+ (switch-to-buffer-other-window outbuffer)
+ (erase-buffer)
+ (setq truncate-lines t)
+ (gr-mode)
+ (call-process prg nil outbuffer opts file)
+ )
+)
+
+;; ToDo: Elisp Fcts for calling scripts like gr3ps etc
+
+(define-key gr-mode-map "\C-ct" 'hwl-truncate)
+(define-key gr-mode-map "\C-cw" 'hwl-wrap)
+(define-key gr-mode-map "\C-ch" 'hilit-rehighlight-buffer)
+(define-key gr-mode-map "\C-cp" 'hwl-narrow-to-pe)
+(define-key gr-mode-map "\C-ct" 'hwl-narrow-to-thread)
+(define-key gr-mode-map "\C-ce" 'hwl-narrow-to-event)
+(define-key gr-mode-map "\C-c\C-e" '(lambda () (hwl-narrow-to-event "END")))
+(define-key gr-mode-map "\C-c " 'hwl-toggle-truncate-wrap)
+(define-key gr-mode-map "\C-cN" 'hwl-hi-node)
+(define-key gr-mode-map "\C-cT" 'hwl-hi-thread)
+(define-key gr-mode-map "\C-c\C-c" 'hwl-hi-clear)
+
+;; ---------------------------------------------------------------------------
+;; Mode for threaded C files
+;; ---------------------------------------------------------------------------
+
+(setq auto-mode-alist
+ (append '(("\\.hc" . hc-mode))
+ auto-mode-alist))
+
+(define-derived-mode hc-mode c-mode "hc Mode"
+ "Derived mode for Haskell C files."
+)
+
+(hilit-set-mode-patterns
+ 'hc-mode
+ '(
+ ("\\(GRAN_FETCH\\|GRAN_RESCHEDULE\\|GRAN_FETCH_AND_RESCHEDULE\\|GRAN_EXEC\\|GRAN_YIELD\\)" 1 keyword)
+ ("FB_" nil defun)
+ ("FE_" nil define)
+ ("__STG_SPLIT_MARKER" nil msg-note)
+ ("^.*_ITBL.*$" nil defun)
+ ("^\\(I\\|E\\|\\)FN.*$" nil define)
+ )
+)
+
+; (define-key global-map [S-pause] 'hc-mode)
diff --git a/ghc/utils/parallel/Jmakefile b/ghc/utils/parallel/Jmakefile
index 371785c667..3967047e52 100644
--- a/ghc/utils/parallel/Jmakefile
+++ b/ghc/utils/parallel/Jmakefile
@@ -2,7 +2,25 @@ PROGRAMS = grs2gr \
gr2ps \
gr2qp \
qp2ps \
- ghc-fool-sort ghc-unfool-sort
+ ghc-fool-sort ghc-unfool-sort \
+ gr2pe \
+ gr2java \
+ gr2jv \
+ gr2ap \
+ qp2ap \
+ gr2gran \
+ gr2RTS \
+ RTS2gran \
+ gran-extr \
+ gp-ext-imp \
+ tf \
+ avg-RTS \
+ SPLIT \
+ AVG \
+ SN \
+ get_SN \
+ sn_filter \
+ ps-scale-y
all:: $(PROGRAMS)
/* stuff to have before we get going */
@@ -17,6 +35,27 @@ MsubProgramScriptTarget(PerlCmd,qp2ps,qp2ps.pl,,)
MsubProgramScriptTarget(PerlCmd,ghc-fool-sort,ghc-fool-sort.pl,,)
MsubProgramScriptTarget(PerlCmd,ghc-unfool-sort,ghc-unfool-sort.pl,,)
+MsubProgramScriptTarget(PerlCmd,gr2pe,gr2pe.pl,,)
+MsubProgramScriptTarget(PerlCmd,gr2java,gr2java.pl,,)
+MsubProgramScriptTarget(/usr/local/bin/bash,gr2jv,gr2jv.bash,,)
+MsubProgramScriptTarget(/usr/local/bin/bash,gr2ap,gr2ap.bash,,)
+MsubProgramScriptTarget(PerlCmd,qp2ap,qp2ap.pl,,)
+
+MsubProgramScriptTarget(/usr/local/bin/bash,gr2gran,gr2gran.bash,,)
+MsubProgramScriptTarget(PerlCmd,gr2RTS,gr2RTS.pl,,)
+MsubProgramScriptTarget(PerlCmd,RTS2gran,RTS2gran.pl,,)
+MsubProgramScriptTarget(PerlCmd,gran-extr,gran-extr.pl,,)
+
+MsubProgramScriptTarget(PerlCmd,gp-ext-imp,gp-ext-imp.pl,,)
+MsubProgramScriptTarget(PerlCmd,tf,tf.pl,,)
+MsubProgramScriptTarget(PerlCmd,avg-RTS,avg-RTS.pl,,)
+MsubProgramScriptTarget(PerlCmd,SPLIT,SPLIT.pl,,)
+MsubProgramScriptTarget(PerlCmd,AVG,AVG.pl,,)
+MsubProgramScriptTarget(PerlCmd,SN,SN.pl,,)
+MsubProgramScriptTarget(PerlCmd,get_SN,get_SN.pl,,)
+MsubProgramScriptTarget(PerlCmd,sn_filter,sn_filter.pl,,)
+MsubProgramScriptTarget(PerlCmd,ps-scale-y,ps-scale-y.pl,,)
+
/* === INSTALLATION ======== */
/* the rest of these vary from std/useful to hackish dans le extreme */
@@ -29,6 +68,27 @@ InstallScriptTarget(qp2ps, $(INSTSCRIPTDIR))
InstallScriptTarget(ghc-fool-sort, $(INSTSCRIPTDIR))
InstallScriptTarget(ghc-unfool-sort,$(INSTSCRIPTDIR))
+InstallScriptTarget(gr2pe, $(INSTSCRIPTDIR))
+InstallScriptTarget(gr2java, $(INSTSCRIPTDIR))
+InstallScriptTarget(gr2jv, $(INSTSCRIPTDIR))
+InstallScriptTarget(gr2ap, $(INSTSCRIPTDIR))
+InstallScriptTarget(qp2ap, $(INSTSCRIPTDIR))
+
+InstallScriptTarget(gr2gran, $(INSTSCRIPTDIR))
+InstallScriptTarget(gr2RTS, $(INSTSCRIPTDIR))
+InstallScriptTarget(RTS2gran, $(INSTSCRIPTDIR))
+InstallScriptTarget(gran-extr, $(INSTSCRIPTDIR))
+
+InstallScriptTarget(gp-ext-imp, $(INSTSCRIPTDIR))
+InstallScriptTarget(tf, $(INSTSCRIPTDIR))
+InstallScriptTarget(avg-RTS, $(INSTSCRIPTDIR))
+InstallScriptTarget(SPLIT, $(INSTSCRIPTDIR))
+InstallScriptTarget(AVG, $(INSTSCRIPTDIR))
+InstallScriptTarget(SN, $(INSTSCRIPTDIR))
+InstallScriptTarget(get_SN, $(INSTSCRIPTDIR))
+InstallScriptTarget(sn_filter, $(INSTSCRIPTDIR))
+InstallScriptTarget(ps-scale-y, $(INSTSCRIPTDIR))
+
/* === OTHER STUFF ========= */
ExtraStuffToClean($(PROGRAMS))
diff --git a/ghc/utils/parallel/RTS2gran.pl b/ghc/utils/parallel/RTS2gran.pl
new file mode 100644
index 0000000000..32012afac8
--- /dev/null
+++ b/ghc/utils/parallel/RTS2gran.pl
@@ -0,0 +1,684 @@
+#!/usr/local/bin/perl
+##############################################################################
+# Time-stamp: <Mon May 20 1996 17:22:45 Stardate: [-31]7533.41 hwloidl>
+#
+# Usage: RTS2gran <RTS-file>
+#
+# Options:
+# -t <file> ... use <file> as template file (<,> global <.> local template)
+# -p <file> ... use <file> as gnuplot .gp file (default: gran.gp)
+# -x <x-size> ... of gnuplot graph
+# -y <y-size> ... of gnuplot graph
+# -n <n> ... use <n> as number of PEs in title
+# -h ... help; print this text.
+# -v ... verbose mode.
+#
+##############################################################################
+
+# ----------------------------------------------------------------------------
+# Command line processing and initialization
+# ----------------------------------------------------------------------------
+
+$gran_dir = $ENV{'GRANDIR'};
+if ( $gran_dir eq "" ) {
+ print STDERR "RTS2gran: Warning: Env variable GRANDIR is undefined\n";
+}
+
+push(@INC, $gran_dir, $gran_dir . "/bin");
+# print STDERR "INC: " . join(':',@INC) . "\n";
+
+require "getopts.pl";
+require "template.pl"; # contains read_template for parsing template file
+require "stats.pl"; # statistics package with corr and friends
+
+&Getopts('hvt:p:x:y:n:Y:Z:');
+
+$OPEN_INT = 1;
+$CLOSED_INT = 0;
+
+do process_options();
+
+if ( $opt_v ) {
+ do print_verbose_message ();
+}
+
+# ----------------------------------------------------------------------------
+# The real thing
+# ----------------------------------------------------------------------------
+
+$max_y = &pre_process($input);
+
+open(INPUT,"<$input") || die "Couldn't open input file $input";
+open(OUT_CUMU,">$cumulat_rts_file_name") || die "Couldn't open output file $cumulat_rts_file_name";
+open(OUT_CUMU0,">$cumulat0_rts_file_name") || die "Couldn't open output file $cumulat0_rts_file_name";
+
+#do skip_header();
+
+$tot_total_rt = 0;
+$tot_rt = 0;
+$count = 0;
+$last_rt = 0;
+$last_x = 0;
+$last_y = ($logscale{"'g'"} ne "") ? 1 : 0;
+
+$line_no = 0;
+while (<INPUT>) {
+ $line_no++;
+ next if /^--/; # Comment lines start with --
+ next if /^\s*$/; # Skip empty lines
+ $rt = $1 if /^(\d+)/;
+ $count++;
+
+ if ( $opt_D ) {
+ print STDERR "Error @ line $line_no: RTS file not sorted!\n";
+ }
+
+ #push(@all_rts,$rt);
+ $sum_rt += $rt;
+
+ $index = do get_index_open_int($rt,@exec_times);
+ $exec_class[$index]++;
+
+ if ( $last_rt != $rt ) {
+ print OUT_CUMU "$rt \t" . int($last_y/$max_y) . "\n";
+ print OUT_CUMU0 "$rt \t$last_y\n";
+ print OUT_CUMU "$rt \t" . int($count/$max_y) . "\n";
+ print OUT_CUMU0 "$rt \t$count\n";
+ $last_x = $rt;
+ $last_y = $count;
+ }
+
+ $last_rt = $rt;
+}
+print OUT_CUMU "$rt \t" . int($last_y/$max_y) . "\n";
+print OUT_CUMU0 "$rt \t$last_y\n";
+print OUT_CUMU "$rt \t" . int($count/$max_y) . "\n";
+print OUT_CUMU0 "$rt \t$count\n";
+
+close OUT_CUMU;
+close OUT_CUMU0;
+
+$tot_tasks = $count; # this is y-max in cumulat graph
+$max_rt = $rt; # this is x-max in cumulat graph
+
+$max_rt_class = &list_max(@exec_class);
+
+do write_data($gran_file_name, $OPEN_INT, $logscale{"'g'"}, $#exec_times+1,
+ @exec_times, @exec_class);
+
+# ----------------------------------------------------------------------------
+# Run GNUPLOT over the data files and create figures
+# ----------------------------------------------------------------------------
+
+do gnu_plotify($gp_file_name);
+
+# ----------------------------------------------------------------------------
+
+if ( $max_y != $tot_tasks ) {
+ if ( $pedantic ) {
+ die "ERROR: pre-processed number of tasks ($max_y) does not match computed one ($tot_tasks)\n";
+ } else {
+ print STDERR "Warning: pre-processed number of tasks ($max_y) does not match computed one ($tot_tasks)\n" if $opt_v;
+ }
+}
+
+exit 0;
+
+# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+# ToDo: Put these routines into an own package
+# ----------------------------------------------------------------------------
+# Basic Operations on the intervals
+# ----------------------------------------------------------------------------
+
+sub get_index_open_int {
+ local ($value,@list) = @_;
+ local ($index,$right);
+
+ # print "get_index: searching for index of" . $value;
+ # print " in " . join(':',@list);
+
+ $index = 0;
+ $right = $list[$index];
+ while ( ($value >= $right) && ($index < $#list) ) {
+ $index++;
+ $right = $list[$index];
+ }
+
+ return ( ($index == $#list) && ($value > $right) ) ? $index+1 : $index;
+}
+
+# ----------------------------------------------------------------------------
+
+sub get_index_closed_int {
+ local ($value,@list) = @_;
+ local ($index,$right);
+
+ if ( ($value < $list[0]) || ($value > $list[$#list]) ) {
+ return ( -1 );
+ }
+
+ $index = 0;
+ $left = $list[$index];
+ while ( ($left <= $value) && ($index < $#list) ) {
+ $index++;
+ $left = $list[$index];
+ }
+ return ( $index-1 );
+}
+
+# ----------------------------------------------------------------------------
+# Write operations
+# ----------------------------------------------------------------------------
+
+sub write_data {
+ local ($file_name, $open_int, $logaxes, $n, @rest) = @_;
+ local (@times) = splice(@rest,0,$n);
+ local (@class) = @rest;
+
+ open(GRAN,">$file_name") || die "Couldn't open file $file_name for output";
+
+ if ( $open_int == $OPEN_INT ) {
+
+ for ($i=0,
+ $left = ( index($logaxes,"x") != -1 ? int($times[0]/2) : 0 ),
+ $right = 0;
+ $i < $n;
+ $i++, $left = $right) {
+ $right = $times[$i];
+ print GRAN int(($left+$right)/2) . " " .
+ ($class[$i] eq "" ? "0" : $class[$i]) . "\n";
+ }
+ print GRAN $times[$n-1]+(($times[$n-1]-$times[$n-2])/2) . " " .
+ ($class[$n] eq "" ? "0" : $class[$n]) . "\n";
+
+ } else {
+
+ print GRAN ( (index($logaxes,"x") != -1) && ($times[0] == 0 ? int($times[1]/2) : ($times[$1] + $times[0])/2 ) . " " . $class[0] . "\n");
+ for ($i=1; $i < $n-2; $i++) {
+ $left = $times[$i];
+ $right = $times[$i+1];
+ print(GRAN ($left+$right)/2 . " " .
+ ($class[$i] eq "" ? "0" : $class[$i]) . "\n");
+ }
+ print GRAN ($times[$n-1]+$times[$n-2])/2 . " " . $class[$n-2] if $n >= 2;
+ }
+
+ close(GRAN);
+}
+
+# ----------------------------------------------------------------------------
+
+sub write_array {
+ local ($file_name,$n,@list) = @_;
+
+ open(FILE,">$file_name") || die "$file_name: $!";
+ for ($i=0; $i<=$#list; $i++) {
+ print FILE $i . " " . ( $list[$i] eq "" ? "0" : $list[$i] ) . "\n";
+ }
+
+ if ( $opt_D ) {
+ print "write_array: (" . join(", ",1 .. $#list) . ")\n for file $file_name returns: \n (0, $#list, &list_max(@list)\n";
+ }
+
+ return ( (0, $#list, &list_max(@list),
+ "(" . join(", ",1 .. $#list) . ")\n") );
+}
+
+# ----------------------------------------------------------------------------
+
+sub gnu_plotify {
+ local ($gp_file_name) = @_;
+
+ @open_xrange = &range($OPEN_INT,$logscale{"'g'"},@exec_times);
+
+ $exec_xtics = $opt_T ? &get_xtics($OPEN_INT,@exec_times) : "" ;
+
+ open(GP_FILE,">$gp_file_name") ||
+ die "Couldn't open gnuplot file $gp_file_name for output\n";
+
+ print GP_FILE "set term postscript \"Roman\" 20\n";
+ do write_gp_record(GP_FILE,
+ $gran_file_name, &dat2ps_name($gran_file_name),
+ "Granularity (pure exec. time)", "Number of threads",
+ $logscale{"'g'"},
+ @open_xrange,$max_rt_class,$exec_xtics);
+
+ do write_gp_lines_record(GP_FILE,
+ $cumulat_rts_file_name, &dat2ps_name($cumulat_rts_file_name),
+ "Cumulative pure exec. times","% of threads",
+ "",
+ $max_rt, 100, "");
+ # $xtics_cluster_rts as last arg?
+
+ do write_gp_lines_record(GP_FILE,
+ $cumulat0_rts_file_name, &dat2ps_name($cumulat0_rts_file_name),
+ "Cumulative pure exec. times","Number of threads",
+ $logscale{"'Cg'"},
+ $max_rt, $tot_tasks, "");
+ # $xtics_cluster_rts as last arg?
+
+ close GP_FILE;
+
+ print "Gnu plotting figures ...\n";
+ system "gnuplot $gp_file_name";
+
+ print "Extending thickness of impulses ...\n";
+ do gp_ext($gran_file_name);
+}
+
+# ----------------------------------------------------------------------------
+
+sub gp_ext {
+ local (@file_names) = @_;
+ local ($file_name);
+ local ($ps_file_name);
+ local ($prg);
+
+ #$prg = system "which gp-ext-imp";
+ #print " Using script $prg for impuls extension\n";
+ $prg = $ENV{GRANDIR} ? $ENV{GRANDIR} . "/bin/gp-ext-imp"
+ : $ENV{HOME} . "/bin/gp-ext-imp" ;
+ if ( $opt_v ) {
+ print " (using script $prg)\n";
+ }
+
+ foreach $file_name (@file_names) {
+ $ps_file_name = &dat2ps_name($file_name);
+ system "$prg -w $ext_size -g $gray " .
+ $ps_file_name . " " .
+ $ps_file_name . "2" ;
+ system "mv " . $ps_file_name . "2 " . $ps_file_name;
+ }
+}
+
+# ----------------------------------------------------------------------------
+
+sub write_gp_record {
+ local ($file,$in_file,$out_file,$xlabel,$ylabel,$logaxes,
+ $xstart,$xend,$ymax,$xtics) = @_;
+
+ if ( $xstart >= $xend ) {
+ print ("WARNING: empty xrange [$xstart:$xend] changed to [$xstart:" . $xstart+1 . "]\n") if ( $pedantic || $opt_v );
+ $xend = $xstart + 1;
+ }
+
+ if ( $ymax <=0 ) {
+ $ymax = 2;
+ print "WARNING: empty yrange changed to [0:$ymax]\n" if ( $pedantic || $opt_v );
+ }
+
+ $str = "set size " . $xsize . "," . $ysize . "\n" .
+ "set xlabel \"" . $xlabel . "\"\n" .
+ "set ylabel \"" . $ylabel . "\"\n" .
+ ($xstart eq "" ? ""
+ : "set xrange [" . int($xstart) .":" . int($xend) . "]\n") .
+ ($opt_Y ?
+ ("set yrange [" . (index($logaxes,"y") != -1 ? 1 : 0) . ":$opt_Y]\n") :
+ ($ymax eq "" ? ""
+ : "set yrange [" . (index($logaxes,"y") != -1 ? 1 : 0) .
+ ":" . &list_max(2,int($ymax+$ymax/5)) . "]\n")) .
+ ($xtics ne "" ? "set xtics $xtics" : "") .
+ "set tics out\n" .
+ "set border\n" .
+ ( $nPEs!=0 ? "set title \"$nPEs PEs\"\n" : "" ) .
+ "set nokey \n" .
+ "set nozeroaxis\n" .
+ "set format xy \"%8.8g\"\n" .
+ (index($logaxes,"x") != -1 ?
+ "set logscale x\n" :
+ "set nologscale x\n") .
+ (index($logaxes,"y") != -1 ?
+ "set logscale y\n" :
+ "set nologscale y\n") .
+ "set output \"" . $out_file . "\"\n" .
+ "plot \"" . $in_file . "\" with impulses\n\n";
+ print $file $str;
+}
+
+# ----------------------------------------------------------------------------
+
+sub write_gp_lines_record {
+ local ($file,$in_file,$out_file,$xlabel,$ylabel,$logaxes,
+ $xend,$yend,$xtics) = @_;
+
+ local ($str);
+
+ $str = "set xlabel \"" . $xlabel . "\"\n" .
+ "set ylabel \"" . $ylabel . "\"\n" .
+ "set xrange [" . ( index($logaxes,"x") != -1 ? 1 : 0 ) . ":$xend]\n" .
+ "set yrange [" . ( index($logaxes,"y") != -1 ? 1 : 0 ) .
+ ($yend!=100 && $opt_Z ? ":$opt_Z]\n" : ":$yend]\n") .
+ "set border\n" .
+ "set nokey\n" .
+ ( $xtics ne "" ? "set xtics $xtics" : "" ) .
+ (index($logaxes,"x") != -1 ?
+ "set logscale x\n" :
+ "set nologscale x\n") .
+ (index($logaxes,"y") != -1 ?
+ "set logscale y\n" :
+ "set nologscale y\n") .
+ "set nozeroaxis\n" .
+ "set format xy \"%8.8g\"\n" .
+ "set output \"" . $out_file . "\"\n" .
+ "plot \"" . $in_file . "\" with lines\n\n";
+ print $file $str;
+}
+
+
+# ----------------------------------------------------------------------------
+
+sub write_gp_simple_record {
+ local ($file,$in_file,$out_file,$xlabel,$ylabel,$logaxes,
+ $xstart,$xend,$ymax,$xtics) = @_;
+
+ $str = "set size " . $xsize . "," . $ysize . "\n" .
+ "set xlabel \"" . $xlabel . "\"\n" .
+ "set ylabel \"" . $ylabel . "\"\n" .
+ ($xstart eq "" ? ""
+ : "set xrange [" . int($xstart) .":" . int($xend) . "]\n") .
+ ($ymax eq "" ? ""
+ : "set yrange [" . (index($logaxes,"y") != -1 ? 1 : 0) .
+ ":" . &list_max(2,int($ymax+$ymax/5)) . "]\n") .
+ ($xtics ne "" ? "set xtics $xtics" : "") .
+ "set border\n" .
+ "set nokey\n" .
+ "set tics out\n" .
+ "set nozeroaxis\n" .
+ "set format xy \"%8.8g\"\n" .
+ (index($logaxes,"x") != -1 ?
+ "set logscale x\n" :
+ "set nologscale x\n") .
+ (index($logaxes,"y") != -1 ?
+ "set logscale y\n" :
+ "set nologscale y\n") .
+ "set output \"" . $out_file . "\"\n" .
+ "plot \"" . $in_file . "\" with impulses\n\n";
+ print $file $str;
+}
+
+# ----------------------------------------------------------------------------
+
+sub range {
+ local ($open_int, $logaxes, @ints) = @_;
+
+ local ($range, $left_margin, $right_margin);
+
+ $range = $ints[$#ints]-$ints[0];
+ $left_margin = 0; # $range/10;
+ $right_margin = 0; # $range/10;
+
+ if ( $opt_D ) {
+ print "\n==> Range: logaxes are $logaxes i.e. " .
+ (index($logaxes,"x") != -1 ? "matches x axis\n"
+ : "DOESN'T match x axis\n");
+ }
+ if ( index($logaxes,"x") != -1 ) {
+ if ( $open_int == $OPEN_INT ) {
+ return ( ($ints[0]/2-$left_margin,
+ $ints[$#ints]+($ints[$#ints]-$ints[$#ints-1])/2+$right_margin) );
+ } else {
+ return ( ( &list_max(1,$ints[0]-$left_margin),
+ $ints[$#ints]+($ints[$#ints]-$ints[$#ints-1])/2+$right_margin) );
+ }
+ } else {
+ if ( $open_int == $OPEN_INT ) {
+ return ( ($ints[0]/2-$left_margin,
+ $ints[$#ints]+($ints[$#ints]-$ints[$#ints-1])/2+$right_margin) );
+ } else {
+ return ( ($ints[0]-$left_margin,
+ $ints[$#ints]+($ints[$#ints]-$ints[$#ints-1])/2+$right_margin) );
+ }
+ }
+}
+
+# ----------------------------------------------------------------------------
+
+# ----------------------------------------------------------------------------
+
+sub process_options {
+ if ( $opt_h ) {
+ open(ME,$0) || die "Can't open myself ($0)";
+ $n = 0;
+ while (<ME>) {
+ last if $_ =~ /^$/;
+ print $_;
+ $n++;
+ }
+ close(ME);
+
+ # system "cat $0 | awk 'BEGIN { n = 0; } \
+ # /^$/ { print n; \
+ # exit; } \
+ # { n++; }'"
+ exit ;
+ }
+
+ $input = $#ARGV == -1 ? "-" : $ARGV[0] ;
+
+ if ( $#ARGV != 0 ) {
+ #print "Usage: gran-extr [options] <sim-file>\n";
+ #print "Use -h option to get details\n";
+ #exit 1;
+
+ }
+
+ # Default settings:
+ $gp_file_name = "gran.gp";
+ $gran_file_name = "gran.dat";
+ $cumulat_rts_file_name = "cumu-rts.dat";
+ $cumulat0_rts_file_name = "cumu-rts0.dat";
+ $xsize = 1;
+ $ysize = 1;
+
+ if ( $opt_p ) {
+ $gp_file_name = $opt_p;
+ } else {
+ $gp_file_name = "gran.gp";
+ }
+
+ #if ( $opt_s ) {
+ # $gp_file_name =~ s|\.|${opt_s}.|;
+ # $gran_file_name =~ s|\.|${opt_s}.|;
+ # $cumulat_rts_file_name =~ s|\.|${opt_s}.|;
+ # $cumulat0_rts_file_name =~ s|\.|${opt_s}.|;
+ #}
+
+ if ( $opt_x ) {
+ $xsize = $opt_x;
+ } else {
+ $xsize = 1;
+ }
+
+ if ( $opt_y ) {
+ $ysize = $opt_y;
+ } else {
+ $ysize = 1;
+ }
+
+ if ( $opt_t ) {
+ do read_template($opt_t,$input);
+ }
+
+}
+
+# ----------------------------------------------------------------------------
+
+sub print_verbose_message {
+
+ print "-" x 70 . "\n";
+ print "Setup: \n";
+ print "-" x 70 . "\n";
+ print "\nFilenames: \n";
+ print " Input file: $input\n";
+ print " Gran files: $gran_file_name $gran_global_file_name $gran_local_file_name\n";
+ print " Comm files: $comm_file_name $comm_global_file_name $comm_local_file_name\n";
+ print " Sparked threads file: $spark_file_name $spark_local_file_name $spark_global_file_name\n";
+ print " Heap file: $ha_file_name\n";
+ print " GNUPLOT file name: $gp_file_name Correlation file name: $corr_file_name\n";
+ print " Cumulative RT file name: $cumulat_rts_file_name ($cumulat0_rts_file_name) \n Cumulative HA file name: $cumulat_has_file_name\n";
+ print " Cluster RT file name: $clust_rts_file_name \n Cluster HA file name: $clust_has_file_name\n";
+ print " Cumulative runtimes file name: $cumulat_rts_file_name\n";
+ print " Cumulative heap allocations file name $cumulat_has_file_name\n";
+ print " Cluster run times file name: $clust_rts_file_name\n";
+ print " Cluster heap allocations file name: $clust_has_file_name\n";
+ print " PE load file name: $pe_file_name\n";
+ print " Site size file name: $sn_file_name\n";
+ print "\nBoundaries: \n";
+ print " Gran boundaries: (" . join(',',@exec_times) . ")\n";
+ print " Comm boundaries: (" . join(',',@comm_percs) . ")\n";
+ print " Sparked threads boundaries: (" . join(',',@sparks) . ")\n";
+ print " Heap boundaries: (" . join(',',@has) .")\n";
+ print "\nOther pars: \n";
+ print " Left margin: $left_margin Right margin: $right_margin\n";
+ print " GP-extension: $ext_size GP xsize: $xsize GP ysize: $ysize\n";
+ print " Gray scale: $gray Smart x-tics is " . ($opt_T ? "ON" : "OFF") .
+ " Percentage y-axis is " . ($opt_P ? "ON" : "OFF") . "\n";
+ print " Log. scaling assoc list: ";
+ while (($key,$value) = each %logscale) {
+ print "$key: $value, ";
+ }
+ print "\n";
+ print " Active template file: $templ_file\n" if $opt_t;
+ print "-" x 70 . "\n";
+}
+
+# ----------------------------------------------------------------------------
+
+sub pre_process {
+ local ($file) = @_;
+
+ open(PIPE,"wc -l $input |") || die "Couldn't open pipe";
+
+ while (<PIPE>) {
+ if (/^\s*(\d+)/) {
+ $res = $1;
+ } else {
+ die "Error in pre-processing: Last line of $file does not match RTS!\n";
+ }
+ }
+ close(PIPE);
+
+ return ($res-1);
+}
+
+# ----------------------------------------------------------------------------
+
+
+# ----------------------------------------------------------------------------
+#
+# Old version (eventually delete it)
+# New version is in template.pl
+#
+# sub read_template {
+# local ($f);
+#
+# if ( $opt_v ) {
+# print "Reading template file $templ_file_name ...\n";
+# }
+#
+# ($f = ($input eq "-" ? "stdin" : $input)) =~ s/.rts//;
+#
+# open(TEMPLATE,"cat $templ_file_name | sed -e 's/\$0/$f/' |")
+# || die "Couldn't open file $templ_file_name";
+#
+# while (<TEMPLATE>) {
+# next if /^\s*$/ || /^--/;
+# if (/^\s*G[:,;.\s]+([^\n]+)$/) {
+# $list_str = $1;
+# $list_str =~ s/[\(\)\[\]]//g;
+# @exec_times = split(/[,;. ]+/, $list_str);
+# } elsif (/^\s*F[:,;.\s]+([^\n]+)$/) {
+# $list_str = $1;
+# $list_str =~ s/[\(\)\[\]]//g;
+# @fetch_times = split(/[,;. ]+/, $list_str);
+# } elsif (/^\s*A[:,;.\s]+([^\n]+)$/) {
+# $list_str = $1;
+# $list_str =~ s/[\(\)\[\]]//g;
+# @has = split(/[,;. ]+/, $list_str);
+# } elsif (/^\s*C[:,;.\s]+([^\n]+)$/) {
+# $list_str = $1;
+# $list_str =~ s/[\(\)\[\]]//g;
+# @comm_percs = split(/[,;. ]+/, $list_str);
+# } elsif (/^\s*S[:,;.\s]+([^\n]+)$/) {
+# $list_str = $1;
+# $list_str =~ s/[\(\)\[\]]//g;
+# @sparks = split(/[,;. ]+/, $list_str);
+# } elsif (/^\s*g[:,;.\s]+([\S]+)$/) {
+# ($gran_file_name,$gran_global_file_name, $gran_local_file_name) =
+# &mk_global_local_names($1);
+# } elsif (/^\s*f[:,;.\s]+([\S]+)$/) {
+# ($ft_file_name,$ft_global_file_name, $ft_local_file_name) =
+# &mk_global_local_names($1);
+# } elsif (/^\s*c[:,;.\s]+([\S]+)$/) {
+# ($comm_file_name, $comm_global_file_name, $comm_local_file_name) =
+# &mk_global_local_names($1);
+# } elsif (/^\s*s[:,;.\s]+([\S]+)$/) {
+# ($spark_file_name, $spark_global_file_name, $spark_local_file_name) =
+# &mk_global_local_names($1);
+# } elsif (/^\s*a[:,;.\s]+([\S]+)$/) {
+# ($ha_file_name, $ha_global_file_name, $ha_local_file_name) =
+# &mk_global_local_names($1);
+# } elsif (/^\s*p[:,;.\s]+([\S]+)$/) {
+# $gp_file_name = $1;
+# $ps_file_name = &dat2ps_name($gp_file_name);
+#
+# } elsif (/^\s*Xcorr[:,;.\s]+([\S]+)$/) {
+# $corr_file_name = $1;
+# } elsif (/^\s*Xcumulat-rts[:,;.\s]+([\S]+)$/) {
+# $cumulat_rts_file_name = $1;
+# ($cumulat0_rts_file_name = $1) =~ s/\./0./;
+# } elsif (/^\s*Xcumulat-has[:,;.\s]+([\S]+)$/) {
+# $cumulat_has_file_name = $1;
+# } elsif (/^\s*Xcumulat-fts[:,;.\s]+([\S]+)$/) {
+# $cumulat_fts_file_name = $1;
+# } elsif (/^\s*Xcumulat-cps[:,;.\s]+([\S]+)$/) {
+# $cumulat_cps_file_name = $1;
+# } elsif (/^\s*Xclust-rts[:,;.\s]+([\S]+)$/) {
+# $clust_rts_file_name = $1;
+# } elsif (/^\s*Xclust-has[:,;.\s]+([\S]+)$/) {
+# $clust_has_file_name = $1;
+# } elsif (/^\s*Xclust-fts[:,;.\s]+([\S]+)$/) {
+# $clust_fts_file_name = $1;
+# } elsif (/^\s*Xclust-cps[:,;.\s]+([\S]+)$/) {
+# $clust_cps_file_name = $1;
+# } elsif (/^\s*Xpe[:,;.\s]+([\S]+)$/) {
+# $pe_file_name = $1;
+# } elsif (/^\s*Xsn[:,;.\s]+([\S]+)$/) {
+# $sn_file_name = $1;
+#
+# } elsif (/^\s*XRTS[:,;.\s]+([\S]+)$/) {
+# $rts_file_name = $1;
+# } elsif (/^\s*XHAS[:,;.\s]+([\S]+)$/) {
+# $has_file_name = $1;
+# } elsif (/^\s*XFTS[:,;.\s]+([\S]+)$/) {
+# $fts_file_name = $1;
+# } elsif (/^\s*XLSPS[:,;.\s]+([\S]+)$/) {
+# $lsps_file_name = $1;
+# } elsif (/^\s*XGSPS[:,;.\s]+([\S]+)$/) {
+# $gsps_file_name = $1;
+# } elsif (/^\s*XCPS[:,;.\s]+([\S]+)$/) {
+# $cps_file_name = $1;
+# } elsif (/^\s*XCCPS[:,;.\s]+([\S]+)$/) {
+# $ccps_file_name = $1;
+#
+# } elsif (/^\s*\-[:,;.\s]+([\S]+)$/) {
+# $input = $1;
+# } elsif (/^\s*L[:,;\s]+(.*)$/) {
+# $str = $1;
+# %logscale = ('g',"xy",'a',"xy",'Cg',"xy",'Ca',"xy",'Yp',"y",'Ys',"y") , next if $str eq ".";
+# $str =~ s/[\(\)\[\]]//g;
+# %logscale = split(/[,;. ]+/, $str);
+# } elsif (/^\s*i[:,;.\s]+([\S]+)$/) {
+# $gray = $1;
+# } elsif (/^\s*k[:,;.\s]+([\S]+)$/) {
+# $no_of_clusters = $1;
+# } elsif (/^\s*e[:,;.\s]+([\S]+)$/) {
+# $ext_size = $1;
+# } elsif (/^\s*v.*$/) {
+# $verbose = 1;
+# } elsif (/^\s*T.*$/) {
+# $opt_T = 1;
+# }
+# }
+# close(TEMPLATE);
+# }
diff --git a/ghc/utils/parallel/SN.pl b/ghc/utils/parallel/SN.pl
new file mode 100644
index 0000000000..0711d687a5
--- /dev/null
+++ b/ghc/utils/parallel/SN.pl
@@ -0,0 +1,280 @@
+#!/usr/local/bin/perl
+# (C) Hans Wolfgang Loidl, November 1995
+#############################################################################
+# Time-stamp: <Sun Nov 5 1995 00:23:45 Stardate: [-31]6545.08 hwloidl>
+#
+# Usage: SN [options] <gr-file>
+#
+# Create a summary of spark names that occur in gr-file (only END events in
+# gr-file are necessary). Creates a gnuplot impulses graph (spark names by
+# number of threads) as summary.
+#
+# Options:
+# -h ... help; print this text.
+# -v ... verbose mode.
+#
+#############################################################################
+
+$gran_dir = $ENV{'GRANDIR'};
+if ( $gran_dir eq "" ) {
+ print STDERR "SN: Warning: Env variable GRANDIR is undefined\n";
+}
+
+push(@INC, $gran_dir, $gran_dir . "/bin");
+# print STDERR "INC: " . join(':',@INC) . "\n";
+
+require "getopts.pl";
+require "aux.pl";
+require "stats.pl";
+
+&Getopts('hv');
+
+do process_options();
+
+if ( $opt_v ) { do print_verbose_message(); }
+
+# ---------------------------------------------------------------------------
+# Init
+# ---------------------------------------------------------------------------
+
+chop($date = `date`);
+chop($stardate = `stardate`);
+
+open (IN,"<$input") || die "$!: $input";
+$n = 0;
+$is_end=0;
+while (<IN>) {
+ $is_end = 1 if /END\s+(\w+).*SN\s+(\d+).*RT\s*(\d+)/;
+ next unless $is_end;
+ $n++;
+ $sn = $2;
+ $rt = $3;
+ #$sn_dec = hex($sn);
+ $num_sns{$sn}++;
+ $rts_sns{$sn} += $rt;
+ #do inc ($sn_dec);
+ $is_end=0;
+}
+close (IN);
+
+@sorted_keys=sort {$a<=>$b} keys(%num_sns);
+#$max_val=&list_max(@sorted_keys);
+
+open (SUM,">$summary") || die "$!: $summary";
+
+print SUM "# Generated by SN at $date $stardate\n";
+print SUM "# Input file: $input\n";
+print SUM "#" . "-"x77 . "\n";
+print SUM "Total number of threads: $n\n";
+print SUM "# Format: SN: Spark Site N: Number of threads AVG: average RT\n";
+# . "RTS: Sum of RTs ";
+
+foreach $k (@sorted_keys) {
+ $num = $num_sns{$k};
+ $rts = $rts_sns{$k};
+ $avg = $rts/$num;
+ #print SUM "SN: $k \tN: $num \tRTS: $rts \tAVG: $avg\n";
+ print SUM "$k \t$num \t$avg\n";
+}
+close (SUM);
+
+open (OUT,">$output") || die "$!: $output";
+print OUT "# Generated by SN at $date $stardate\n";
+print OUT "# Input file: $input\n";
+print OUT "#" . "-"x77 . "\n";
+
+$max_val=0;
+foreach $k (@sorted_keys) {
+ $num = $num_sns{$k};
+ $max_val = $num if $num > $max_val;
+ print OUT "$k\t$num\n";
+}
+close (OUT);
+
+do write_gp($gp_file,$ps_file);
+
+print "Gnu plotting figures ...\n";
+system "gnuplot $gp_file";
+
+print "Extending thickness of impulses ...\n";
+$ext_size = 100;
+$gray = 0.3;
+do gp_ext($ps_file);
+
+exit (0);
+
+# +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+
+sub inc {
+ local ($sn) = @_;
+ local (@k);
+
+ @k = keys(%num_sns);
+ if ( &is_elem($sn, @k) ) {
+ $num_sns{$sn}++;
+ } else {
+ $num_sns{$sn} = 1;
+ }
+}
+
+# ----------------------------------------------------------------------------
+
+sub is_elem {
+ local ($x,@list) = @_;
+ local ($found);
+
+ for ($found = 0, $y = shift(@list);
+ $#list == -1 || $found;
+ $found = ($x == $y), $y = shift(@list)) {}
+
+ return ($found);
+}
+
+# ----------------------------------------------------------------------------
+
+# -----------------------------------------------------------------------------
+
+sub process_options {
+
+ if ( $opt_h ) {
+ open(ME,$0) || die "Can't open myself ($0): $!\n";
+ $n = 0;
+ while (<ME>) {
+ last if $_ =~ /^$/;
+ print $_;
+ $n++;
+ }
+ close(ME);
+ exit ;
+ }
+
+ if ( $opt_s ) {
+ $opt_s =~ s/[\(\)\[\]]//g;
+ @sparks = split(/[,;. ]+/, $opt_s);
+ } else {
+ @sparks = ( 3, 4, 5, 6, 7, 8, 11, 12, 13, 14, 15);
+ }
+
+ if ( $#ARGV != 0 ) {
+ print "Usage: $0 [options] <gr-file>\n;";
+ print "Use -h option to get details\n";
+ exit 1;
+ }
+
+ $input = $ARGV[0];
+ ($ps_file = $input) =~ s/\.gr/-SN.ps/;
+ ($gp_file = $input) =~ s/\.gr/-SN.gp/;
+ ($summary = $input) =~ s/\.gr/-SN.sn/;
+
+ #($basename = $gr_file) =~ s/\.gr//;
+ #$rts_file = $basename . ".rts"; # "RTS";
+ #$gran_file = "g.ps"; # $basename . ".ps";
+ #$rts_file = $gr_file;
+ #$rts_file =~ s/\.gr/.rts/g;
+
+ if ( $opt_o ) {
+ $output = $opt_o;
+ } else {
+ ($output = $input) =~ s/\.gr/-SN.dat/;
+ }
+
+ if ( $opt_e ) {
+ $ext_size = $opt_e;
+ } else {
+ $ext_size = 100;
+ }
+
+ if ( $opt_i ) {
+ $gray = $opt_i;
+ } else {
+ $gray = 0;
+ }
+}
+
+# -----------------------------------------------------------------------------
+
+sub print_verbose_message {
+ print "Input: $input \tOutput: $output\n";
+}
+
+# -----------------------------------------------------------------------------
+
+# ToDo: Takes these from global module:
+
+# ----------------------------------------------------------------------------
+
+sub gp_ext {
+ local (@file_names) = @_;
+ local ($file_name);
+ local ($ps_file_name);
+ local ($prg);
+
+ #$prg = system "which gp-ext-imp";
+ #print " Using script $prg for impuls extension\n";
+ $prg = $ENV{GRANDIR} ? $ENV{GRANDIR} . "/bin/gp-ext-imp"
+ : $ENV{HOME} . "/bin/gp-ext-imp" ;
+ if ( $opt_v ) {
+ print " (using script $prg)\n";
+ }
+
+ foreach $file_name (@file_names) {
+ $ps_file_name = $file_name; # NB change to orig !!!!&dat2ps_name($file_name);
+ system "$prg -w $ext_size -g $gray " .
+ $ps_file_name . " " .
+ $ps_file_name . "2" ;
+ system "mv " . $ps_file_name . "2 " . $ps_file_name;
+ }
+}
+
+# ----------------------------------------------------------------------------
+
+sub write_gp {
+ local ($gp_file,$ps_file) = @_;
+ local ($str);
+
+ $xsize = 1;
+ $ysize = 1;
+ $xlabel = "Spark sites";
+ $ylabel = "Number of threads";
+ $xstart = &list_min(@sorted_keys);
+ $xend = &list_max(@sorted_keys);
+ $ymax = $max_val;
+ $xtics = ""; "(" . join(',',@sorted_keys) . ")\n";
+ $in_file = $output;
+ $out_file = $ps_file;
+
+ open (GP,">$gp_file") || die "$!: $gp_file";
+ print GP "set term postscript \"Roman\" 20\n";
+
+ # identical to the part in write_gp_record of RTS2gran
+
+ $str = "set size " . $xsize . "," . $ysize . "\n" .
+ "set xlabel \"" . $xlabel . "\"\n" .
+ "set ylabel \"" . $ylabel . "\"\n" .
+ ($xstart eq "" ? ""
+ : "set xrange [" . int($xstart) .":" . int($xend) . "]\n") .
+ ($opt_Y ?
+ ("set yrange [" . (index($logaxes,"y") != -1 ? 1 : 0) . ":$opt_Y]\n") :
+ ($ymax eq "" ? ""
+ : "set yrange [" . (index($logaxes,"y") != -1 ? 1 : 0) .
+ ":" . &list_max(2,int($ymax+$ymax/5)) . "]\n")) .
+ ($xtics ne "" ? "set xtics $xtics" : "") .
+ "set tics out\n" .
+ "set border\n" .
+ ( $nPEs!=0 ? "set title \"$nPEs PEs\"\n" : "" ) .
+ "set nokey \n" .
+ "set nozeroaxis\n" .
+ "set format xy \"%8.8g\"\n" .
+ (index($logaxes,"x") != -1 ?
+ "set logscale x\n" :
+ "set nologscale x\n") .
+ (index($logaxes,"y") != -1 ?
+ "set logscale y\n" :
+ "set nologscale y\n") .
+ "set output \"" . $out_file . "\"\n" .
+ "plot \"" . $in_file . "\" with impulses\n\n";
+ print GP $str;
+ close (GP);
+}
+
+# ----------------------------------------------------------------------------
diff --git a/ghc/utils/parallel/SPLIT.pl b/ghc/utils/parallel/SPLIT.pl
new file mode 100644
index 0000000000..b4fe46f5b0
--- /dev/null
+++ b/ghc/utils/parallel/SPLIT.pl
@@ -0,0 +1,379 @@
+#!/usr/local/bin/perl
+# (C) Hans Wolfgang Loidl, July 1995
+#############################################################################
+# Time-stamp: <Thu Oct 26 1995 18:23:00 Stardate: [-31]6498.62 hwloidl>
+#
+# Usage: SPLIT [options] <gr-file>
+#
+# Generate a set of granularity graphs out of the GrAnSim profile <gr-file>.
+# The granularity graphs are put into subdirs of the structure:
+# <basename of gr-file>-<spark-name>
+#
+# Options:
+# -s <list> ... a perl list of spark names; the given <gr-file> is scanned
+# for each given name in turn and granularity graphs are
+# generated for each of these sparks
+# -O ... use gr2RTS and RTS2gran instead of gran-extr;
+# this generates fewer output files (only granularity graphs)
+# but should be faster and far less memory consuming
+# -d <dir> ... use <dir> as basename for the sub-directories
+# -o <file> ... use <file> as basename for the generated latex files;
+# the overall result is in <file>.ps
+# -t <file> ... use <file> as gran-extr type template file
+# ('.' for local template, ',' for global template)
+# -A ... surpress generation of granularity profiles for overall .gr
+# -h ... help; print this text.
+# -v ... verbose mode.
+#
+#############################################################################
+
+require "getopts.pl";
+
+&Getopts('hvOAd:o:s:t:');
+
+do process_options();
+
+if ( $opt_v ) { do print_verbose_message(); }
+
+# ---------------------------------------------------------------------------
+# Init
+# ---------------------------------------------------------------------------
+
+$latex = "/usr/local/tex/bin/latex2e"; # or "/usr/local/tex/bin/latex2e"
+
+do all() if !$opt_A;
+
+foreach $s (@sparks) {
+ if ( -f $tmp_file ) { system "rm -f $tmp_file"; }
+ system "tf -H -s $s $gr_file > $tmp_file"
+ || die "Can't open pipe: tf -s $s $gr_file > $tmp_file\n";
+
+ if ( $opt_d ) {
+ $dir = $opt_d;
+ } else {
+ $dir = $gr_file;
+ }
+ $dir =~ s/\.gr//g;
+ $dir .= "-$s";
+
+ if ( ! -d $dir ) {
+ mkdir($dir,"755"); # system "mkdir $dir";
+ system "chmod u+rwx $dir";
+ }
+
+ system "mv $tmp_file $dir/$gr_file";
+ chdir $dir;
+ do print_template();
+ do print_va("Title",$s);
+ if ( -f $va_ps_file ) {
+ local ($old) = $va_ps_file;
+ $old =~ s/\.ps/-o.ps/g;
+ system "mv $va_ps_file $old";
+ }
+ if ( $opt_O ) {
+ system "gr2RTS -o $rts_file $gr_file; " .
+ "RTS2gran -t $template_file $rts_file; " .
+ "$latex $va_file; dvips $va_dvi_file > $va_ps_file";
+ } else {
+ system "gran-extr -t $template_file $gr_file; " .
+ "$latex $va_file; dvips $va_dvi_file > $va_ps_file";
+ }
+ chdir ".."; # system "cd ..";
+}
+
+exit 0;
+
+# -----------------------------------------------------------------------------
+
+sub all {
+
+ $dir = $gr_file;
+ $dir =~ s/\.gr//g;
+ $dir .= "-all";
+
+ if ( ! -d $dir ) {
+ mkdir($dir,"755"); # system "mkdir $dir";
+ system "chmod u+rwx $dir";
+ }
+
+ system "cp $gr_file $dir/$gr_file";
+ chdir $dir;
+ do print_template();
+ do print_va("All","all");
+ if ( -f $va_ps_file ) {
+ local ($old) = $va_ps_file;
+ $old =~ s/\.ps/-o.ps/g;
+ system "mv $va_ps_file $old";
+ }
+ if ( $opt_O ) {
+ system "gr2RTS -o $rts_file $gr_file; " .
+ "RTS2gran -t $template_file $rts_file; " .
+ "$latex $va_file; dvips $va_dvi_file > $va_ps_file";
+ } else {
+ system "gran-extr -t $template_file $gr_file; " .
+ "$latex $va_file; dvips $va_dvi_file > $va_ps_file";
+ }
+ chdir ".."; # system "cd ..";
+}
+
+# ---------------------------------------------------------------------------
+
+sub print_template {
+
+ open (TEMPL,">$template_file") || die "Can't open $template_file\n";
+
+ print TEMPL <<EOF;
+-- Originally copied from the master template: GrAn/bin/TEMPL
+-- Intervals for pure exec. times
+G: (1000, 2000, 3000, 4000, 5000, 10000, 20000, 30000, 40000, 50000, 100000, 200000, 300000)
+-- Intervals for communication (i.e. fetch) times
+F: (1000, 2000, 3000, 4000, 5000, 10000, 20000, 30000, 40000, 50000, 100000, 200000, 300000)
+-- Intervals for communication percentages
+C: (0, 1, 2, 5, 8, 10, 20, 30, 40, 50, 100)
+-- Intervals for no. of sparks
+S: (1, 2, 5)
+-- Intervals for heap allocations
+A: (10,20,30,40,50,100,200,300,400,500,1000,2000,3000)
+-- A: (100, 50000, 66000, 100000)
+
+
+g: g.dat
+f: f.dat
+c: c.dat
+s: s.dat
+a: a.dat
+
+-- Select file name corr coeff file
+Xcorr: CORR
+
+-- Select file names for GNUPLOT data files for cumulative runtime and
+-- cluster graphs
+Xcumulat-rts: cumu-rts.dat
+Xcumulat-fts: cumu-fts.dat
+Xcumulat-has: cumu-has.dat
+Xcumulat-cps: cumu-cps.dat
+Xclust-rts: clust-rts.dat
+Xclust-has: clust-has.dat
+Xclust-cps: clust-cps.dat
+
+-- Select file names for GNUPLOT data files for per proc. runnable time
+-- and per spark site runtime
+Xpe: pe.dat
+Xsn: sn.dat
+
+-- Select file names for sorted lists of runtimes, heap allocs, number of
+-- local and global sparks and communication percentage
+XRTS: RTS
+XFTS: FTS
+XHAS: HAS
+XLSPS: LSPS
+XGSPS: GSPS
+XCPS: CPS
+XCCPS: CPS
+
+-- Std log scaling
+L: .
+-- ('g',"xy",'Cg',"xy",'Ca',"xy")
+
+-- Gray level of impulses in the graph (0=black)
+i: 0.3
+
+-- Number of clusters
+k: 2
+
+-- Width of impulses (needed for gp-ext-imp)
+e: 150
+
+-- Input file
+-- -: soda.gr
+EOF
+
+ close(TEMPL);
+}
+
+# -----------------------------------------------------------------------------
+# NB: different file must be generated for $opt_O and default setup.
+# -----------------------------------------------------------------------------
+
+sub print_va {
+ local ($title, $spark) = @_;
+
+ open (VA,">$va_file") || die "Can't open $va_file\n";
+
+ if ( $opt_O ) {
+ print VA <<EOF;
+% Originally copied from master va-file: grasp/tests/va.tex
+\\documentstyle[11pt,psfig]{article}
+
+% Page Format
+\\topmargin=0cm %0.5cm
+\\textheight=24cm %22cm
+\\footskip=0cm
+\\oddsidemargin=0cm %0.75cm
+\\evensidemargin=0cm %0.75cm
+\\rightmargin=0cm %0.75cm
+\\leftmargin=0cm %0.75cm
+\\textwidth=16cm %14.5cm
+
+\\title{SPLIT}
+\\author{Me}
+\\date{Today}
+
+\\pssilent
+
+\\begin{document}
+
+\\pagestyle{empty}
+\%\\maketitle
+
+\\nopagebreak
+
+\\begin{figure}[t]
+\\begin{center}
+\\begin{tabular}{c}
+\\centerline{\\psfig{angle=270,width=7cm,file=$gran_file}}
+\\end{tabular}
+\\end{center}
+\\caption{Granularity {\\bf $spark}}
+\\end{figure}
+
+\\begin{figure}[t]
+\\begin{center}
+\\begin{tabular}{cc}
+\\psfig{angle=270,width=7cm,file=cumu-rts.ps} &
+\\psfig{angle=270,width=7cm,file=cumu-rts0.ps}
+\\end{tabular}
+\\end{center}
+\\caption{Cumulative Execution Times {\\bf $spark}}
+\\end{figure}
+
+\\end{document}
+EOF
+ } else {
+ print VA <<EOF;
+% Originally copied from master va-file: grasp/tests/va.tex
+\\documentstyle[11pt,psfig]{article}
+
+% Page Format
+\\topmargin=0cm %0.5cm
+\\textheight=24cm %22cm
+\\footskip=0cm
+\\oddsidemargin=0cm %0.75cm
+\\evensidemargin=0cm %0.75cm
+\\rightmargin=0cm %0.75cm
+\\leftmargin=0cm %0.75cm
+\\textwidth=16cm %14.5cm
+
+\\title{$title; Spark: $spark}
+\\author{}
+\\date{}
+
+\\begin{document}
+
+\\pagestyle{empty}
+%\\maketitle
+
+\\nopagebreak
+
+\\begin{figure}[t]
+\\begin{center}
+\\begin{tabular}{cc}
+\\psfig{angle=270,width=7cm,file=$gran_file} &
+\\psfig{angle=270,width=7cm,file=a.ps}
+\\end{tabular}
+\\end{center}
+\\caption{Granularity \\& Heap Allocations {\\bf $spark}}
+\\end{figure}
+
+\\begin{figure}[t]
+\\begin{center}
+\\begin{tabular}{cc}
+\\psfig{angle=270,width=7cm,file=f.ps} &
+\\psfig{angle=270,width=7cm,file=c.ps}
+\\end{tabular}
+\\end{center}
+\\caption{Fetching Profile {\\bf $spark}}
+\\end{figure}
+
+\\begin{figure}[t]
+\\begin{center}
+\\begin{tabular}{cc}
+\\psfig{angle=270,width=7cm,file=cumu-rts.ps} &
+\\psfig{angle=270,width=7cm,file=cumu-rts0.ps}
+\\end{tabular}
+\\end{center}
+\\caption{Cumulative Execution Times {\\bf $spark}}
+\\end{figure}
+
+\\end{document}
+EOF
+}
+ close (VA);
+}
+
+# -----------------------------------------------------------------------------
+
+sub process_options {
+
+ if ( $opt_h ) {
+ open(ME,$0) || die "Can't open myself ($0): $!\n";
+ $n = 0;
+ while (<ME>) {
+ last if $_ =~ /^$/;
+ print $_;
+ $n++;
+ }
+ close(ME);
+ exit ;
+ }
+
+ if ( $opt_s ) {
+ $opt_s =~ s/[\(\)\[\]]//g;
+ @sparks = split(/[,;. ]+/, $opt_s);
+ } else {
+ @sparks = ( 3, 4, 5, 6, 7, 8, 11, 12, 13, 14, 15);
+ }
+
+ if ( $#ARGV != 0 ) {
+ print "Usage: $0 [options] <gr-file>\n;";
+ print "Use -h option to get details\n";
+ exit 1;
+ }
+
+ $gr_file = $ARGV[0];
+ ($basename = $gr_file) =~ s/\.gr//;
+ $rts_file = $basename . ".rts"; # "RTS";
+ $gran_file = "g.ps"; # $basename . ".ps";
+ #$rts_file = $gr_file;
+ #$rts_file =~ s/\.gr/.rts/g;
+
+ if ( $opt_o ) {
+ $va_file = $opt_o;
+ $va_dvi_file = $va_file;
+ $va_dvi_file =~ s/\.tex/.dvi/g;
+ $va_ps_file = $va_file;
+ $va_ps_file =~ s/\.tex/.ps/g;
+ } else {
+ $va_file = "va.tex";
+ $va_dvi_file = "va.dvi";
+ $va_ps_file = "va.ps";
+ }
+
+ if ( $opt_t ) {
+ $template_file = $opt_t;
+ } else {
+ $template_file = "TEMPL";
+ }
+
+ $tmp_file = ",t";
+}
+
+# -----------------------------------------------------------------------------
+
+sub print_verbose_message {
+ print "Sparks: (" . join(',',@sparks) . ")\n";
+ print "Files: .gr " . $gr_file . " template " . $template_file .
+ " va " . $va_file . "\n";
+}
+
+# -----------------------------------------------------------------------------
diff --git a/ghc/utils/parallel/aux.pl b/ghc/utils/parallel/aux.pl
new file mode 100644
index 0000000000..8484057aab
--- /dev/null
+++ b/ghc/utils/parallel/aux.pl
@@ -0,0 +1,89 @@
+#!/usr/local/bin/perl
+##############################################################################
+# Time-stamp: <Sat Oct 28 1995 22:41:09 Stardate: [-31]6509.51 hwloidl>
+#
+# Usage: do ...
+#
+# Various auxiliary Perl subroutines that are mainly used in gran-extr and
+# RTS2gran.
+# This module contains the following `exported' routines:
+# - mk_global_local_names
+# - dat2ps_name
+# The following routines should be local:
+# - basename
+# - dirname
+#
+##############################################################################
+
+# ----------------------------------------------------------------------------
+# Usage: do mk_global_local_names (<file_name>);
+# Returns: (<file_name>,<local_file_name>, <global_file_name>)
+#
+# Take a filename and create names for local and global variants.
+# E.g.: foo.dat -> foo-local.dat and foo-global.dat
+# ----------------------------------------------------------------------------
+
+sub mk_global_local_names {
+ local ($file_name) = @_;
+
+ $file_name .= ".dat" unless $file_name =~ /\.dat$/;
+ $global_file_name = $file_name;
+ $global_file_name =~ s/\.dat/\-global\.dat/ ;
+ $local_file_name = $file_name;
+ $local_file_name =~ s/\.dat/\-local\.dat/ ;
+
+ return ( ($file_name, $global_file_name, $local_file_name) );
+}
+
+
+# ----------------------------------------------------------------------------
+# Usage: do dat2ps(<dat_file_name>);
+# Returns: (<ps_file_name>);
+# ----------------------------------------------------------------------------
+
+sub dat2ps_name {
+ local ($dat_name) = @_;
+
+ $dat_name =~ s/\.dat$/\.ps/;
+ return ($dat_name);
+}
+
+# ----------------------------------------------------------------------------
+# ----------------------------------------------------------------------------
+
+sub basename {
+ local ($in_str) = @_;
+ local ($str,$i) ;
+
+ $i = rindex($in_str,"/");
+ if ($i == -1) {
+ $str = $in_str;
+ } else {
+ $str = substr($in_str,$i+1) ;
+ }
+
+ return $str;
+}
+
+# ----------------------------------------------------------------------------
+
+sub dirname {
+ local ($in_str) = @_;
+ local ($str,$i) ;
+
+ $i = rindex($in_str,"/");
+ if ($i == -1) {
+ $str = "";
+ } else {
+ $str = substr($in_str,0,$i+1) ;
+ }
+
+ return $str;
+}
+
+# ----------------------------------------------------------------------------
+
+
+# ----------------------------------------------------------------------------
+
+1;
diff --git a/ghc/utils/parallel/avg-RTS.pl b/ghc/utils/parallel/avg-RTS.pl
new file mode 100644
index 0000000000..4f25d55f80
--- /dev/null
+++ b/ghc/utils/parallel/avg-RTS.pl
@@ -0,0 +1,15 @@
+#!/usr/local/bin/perl
+
+$n=0;
+$sum=0;
+$last=0;
+while (<>) {
+ next unless /^\d+/;
+ @c = split;
+ $sum += $c[0];
+ $last = $c[0];
+ $n++;
+}
+
+print "Average Runtimes: n=$n; sum=$sum; avg=" . ($sum/$n) . "; max=$last\n";
+
diff --git a/ghc/utils/parallel/get_SN.pl b/ghc/utils/parallel/get_SN.pl
new file mode 100644
index 0000000000..e9426855bf
--- /dev/null
+++ b/ghc/utils/parallel/get_SN.pl
@@ -0,0 +1,40 @@
+#!/usr/local/bin/perl
+#############################################################################
+
+#do get_SN($ARGV[0]);
+
+#exit 1;
+
+# ---------------------------------------------------------------------------
+
+sub get_SN {
+ local ($file) = @_;
+ local ($id,$idx,$sn);
+
+ open (FILE,$file) || die "get_SN: Can't open file $file\n";
+
+ $line_no=0;
+ while (<FILE>) {
+ next unless /END/;
+ # PE 0 [3326775]: END 0, SN 0, ST 0, EXP F, BB 194, HA 1464, RT 983079, BT 1449032 (7), FT 0 (0), LS 0, GS 27, MY T
+
+ if (/^PE\s*(\d+) \[(\d+)\]: END ([0-9a-fx]+), SN (\d+)/) {
+ $line_no++;
+ $idx = $3;
+ $id = hex($idx);
+ $sn = $4;
+ #print STDERR "Id: $id ($idx) --> $sn\n";
+ $id2sn{$id} = $sn;
+ }
+ }
+
+ # print STDERR "get_SN: $line_no lines processed\n";
+ close (FILE);
+
+ # print STDERR "Summary: " . "="x15 . "\n";
+ # foreach $key (keys %id2sn) {
+ # print STDERR "> $key --> $id2sn{$key}\n";
+ #}
+}
+
+1;
diff --git a/ghc/utils/parallel/gp-ext-imp.pl b/ghc/utils/parallel/gp-ext-imp.pl
new file mode 100644
index 0000000000..fa7c4e06d8
--- /dev/null
+++ b/ghc/utils/parallel/gp-ext-imp.pl
@@ -0,0 +1,86 @@
+#!/usr/local/bin/perl
+# #############################################################################
+#
+# Usage: gp-ext-imp [options] [<input-file>] [<output-file>]
+#
+# A small script to produce half-useful bar graphs from the PostScript
+# output produced by gnuplot.
+# Translation is done in the X axis automatically, and should
+# be `good enough' for graphs with smallish numbers of bars.
+#
+# Original version: Bryan O'Sullivan <bos@dcs.glasgow.ac.uk> 09.94
+# New and improved version: Hans Wolfgang Loidl <hwloidl@dcs.glasgow.ac.uk>
+#
+# Options:
+# -w <width> ... width of vertical bars
+# -g <gray-level> ... set gray-level (between 0 and 1; 0 means black)
+# -m <move> ... move the graph <move> pixels to the right
+# -h ... help; print this text
+# -v ... verbose mode
+#
+# #############################################################################
+
+require "getopts.pl";
+
+&Getopts('hvm:w:g:');
+
+if ( $opt_h ) {
+ open(ME,$0) || die "Can't open myself ($0)";
+ $n = 0;
+ while (<ME>) {
+ last if $_ =~ /^$/;
+ print $_;
+ $n++;
+ }
+ close(ME);
+
+ exit ;
+}
+
+$size = $opt_w ? $opt_w : 200;
+$gray = $opt_g ? $opt_g : 0;
+$move = $opt_m ? $opt_m : 150;
+
+$from = $#ARGV >= 0 ? $ARGV[0] : "-";
+$to = $#ARGV >= 1 ? $ARGV[1] : "-";
+
+if ( $opt_v ) {
+ print 70 x "-" . "\n";
+ print "\nSetup: \n";
+ print " Input file: $from Output file: $to\n";
+ print " Width: $size Gray level: $gray Move is " .
+ ($opt_m ? "ON" : "OFF") . " with value $move\n";
+ print 70 x "-" . "\n";
+}
+
+open(FROM, "<$from") || die "$from: $!";
+open(TO, ">$to") || die "$to: $!";
+
+$l = -1;
+
+foreach (<FROM>) {
+ if ($l >= 0) {
+ $l--;
+ }
+ if ($l == 0) {
+ if ( $opt_m ) {
+ # This seems to shift everything a little to the right;
+ print TO "$move 0 translate\n";
+ }
+ print TO "$gray setgray\n";
+ print TO "$size setlinewidth\n";
+ }
+ if (/^LT0$/) {
+ $l = 3;
+ } elsif (/^LT1$/) {
+ print TO "-150 0 translate\n";
+ }
+ print TO;
+}
+
+
+
+
+
+
+
diff --git a/ghc/utils/parallel/gr2RTS.pl b/ghc/utils/parallel/gr2RTS.pl
new file mode 100644
index 0000000000..c609334c28
--- /dev/null
+++ b/ghc/utils/parallel/gr2RTS.pl
@@ -0,0 +1,138 @@
+#!/usr/local/bin/perl
+# (C) Hans Wolfgang Loidl, July 1995
+##############################################################################
+# Time-stamp: <Thu Oct 26 1995 18:40:10 Stardate: [-31]6498.68 hwloidl>
+#
+# Usage: gr2RTS [options] <sim-file>
+#
+# Options:
+# -o <file> ... write output to <file>
+# -h ... help; print this text.
+# -v ... verbose mode.
+#
+##############################################################################
+
+# ----------------------------------------------------------------------------
+# Command line processing and initialization
+# ----------------------------------------------------------------------------
+
+require "getopts.pl";
+
+&Getopts('hvo:');
+
+do process_options();
+
+if ( $opt_v ) {
+ do print_verbose_message ();
+}
+
+# ----------------------------------------------------------------------------
+# The real thing
+# ----------------------------------------------------------------------------
+
+open(INPUT,"<$input") || die "Couldn't open input file $input";
+open(OUTPUT,"| sort -n > $output") || die "Couldn't open output file $output";
+
+#do skip_header();
+
+$tot_total_rt = 0;
+$tot_rt = 0;
+
+$line_no = 0;
+while (<INPUT>) {
+ next if /^--/; # Comment lines start with --
+ next if /^\s*$/; # Skip empty lines
+ $line_no++;
+ @fields = split(/[:,]/,$_);
+ $has_end = 0;
+
+ foreach $elem (@fields) {
+ foo : {
+ $pe = $1, $end = $2 , last foo if $elem =~ /^\s*PE\s+(\d+)\s+\[(\d+)\].*$/;
+ $tn = $1, $has_end = 1 , last foo if $elem =~ /^\s*END\s+(\w+).*$/;
+ # $tn = $1 , last foo if $elem =~ /^\s*TN\s+(\w+).*$/;
+ $sn = $1 , last foo if $elem =~ /^\s*SN\s+(\d+).*$/;
+ $start = $1 , last foo if $elem =~ /^\s*ST\s+(\d+).*$/;
+ $is_global = $1 , last foo if $elem =~ /^\s*EXP\s+(T|F).*$/;
+ $bbs = $1 , last foo if $elem =~ /^\s*BB\s+(\d+).*$/;
+ $ha = $1 , last foo if $elem =~ /^\s*HA\s+(\d+).*$/;
+ $rt = $1 , last foo if $elem =~ /^\s*RT\s+(\d+).*$/;
+ $bt = $1, $bc = $2 , last foo if $elem =~ /^\s*BT\s+(\d+)\s+\((\d+)\).*$/;
+ $ft = $1, $fc = $2 , last foo if $elem =~ /^\s*FT\s+(\d+)\s+\((\d+)\).*$/;
+ $lsp = $1 , last foo if $elem =~ /^\s*LS\s+(\d+).*$/;
+ $gsp = $1 , last foo if $elem =~ /^\s*GS\s+(\d+).*$/;
+ $my = $1 , last foo if $elem =~ /^\s*MY\s+(T|F).*$/;
+ }
+ }
+
+ next unless $has_end == 1;
+
+ $total_rt = $end - $start;
+ $tot_total_rt += $total_rt;
+ $tot_rt += $rt;
+
+ print OUTPUT "$rt\n";
+ $sum_rt += $rt;
+ $max_rt = $rt if $rt > $max_rt;
+}
+
+close INPUT;
+close OUTPUT;
+
+# Hack to fake a filter
+if ( $output eq $filter_output ) {
+ system "cat $output";
+ system "rm $output";
+}
+
+exit 0;
+
+# ---------------------------------------------------------------------------
+
+sub process_options {
+ if ( $opt_h ) {
+ open(ME,$0) || die "Can't open myself ($0)";
+ $n = 0;
+ while (<ME>) {
+ last if $_ =~ /^$/;
+ print $_;
+ $n++;
+ }
+ close(ME);
+
+ # system "cat $0 | awk 'BEGIN { n = 0; } \
+ # /^$/ { print n; \
+ # exit; } \
+ # { n++; }'"
+ exit ;
+ }
+
+ $input = $#ARGV == -1 ? "-" : $ARGV[0] ;
+
+ if ( $#ARGV != 0 ) {
+ #print "Usage: gran-extr [options] <sim-file>\n";
+ #print "Use -h option to get details\n";
+ #exit 1;
+
+ }
+
+ $filter_output = $ENV{'TMPDIR'} . "./,gr2RTS-out";
+ if ( $opt_o ) {
+ $output = $opt_o;
+ } else {
+ if ( $input eq "-" ) {
+ $output = $filter_output;
+ } else {
+ $output = $input; # "RTS";
+ $output =~ s/\.gr$/.rts/g;
+ } #
+ }
+}
+
+# ----------------------------------------------------------------------------
+
+sub print_verbose_message {
+ print "Input file: $input\t Output file: $output\n";
+}
+
+# ----------------------------------------------------------------------------
diff --git a/ghc/utils/parallel/gr2ap.bash b/ghc/utils/parallel/gr2ap.bash
new file mode 100644
index 0000000000..7818fe112b
--- /dev/null
+++ b/ghc/utils/parallel/gr2ap.bash
@@ -0,0 +1,124 @@
+#!/usr/local/bin/bash
+##############################################################################
+# Time-stamp: <Wed Jul 24 1996 20:53:36 Stardate: [-31]7859.14 hwloidl>
+#
+# Usage: gr2ap [options] <gr-file>
+#
+# Create a per-thread activity graph from a GrAnSim (or GUM) profile.
+# Transform the log file of a GrAnSim run (a .gr file) into a quasi-parallel
+# profile (a .qp file) using gr2qp and then into a PostScript file using qp2ap.
+# The generated PostScript file shows one horizontal line for each task. The
+# thickness of the line indicates the state of the thread:
+# thick ... active, medium ... suspended, thin ... fetching remote data
+#
+# Options:
+# -o <file> ... write .ps file to <file>
+# -m ... create mono PostScript file instead a color one.
+# -O ... optimise i.e. try to minimise the size of the .ps file.
+# -v ... be talkative.
+# -h ... print help message (this header).
+#
+##############################################################################
+
+progname="`basename $0`"
+args="$*"
+
+verb=0
+help=0
+mono=""
+apfile=""
+optimise=""
+scale=""
+width=""
+
+getopts "hvmo:s:w:OD" name
+while [ "$name" != "?" ] ; do
+ case $name in
+ h) help=1;;
+ v) verb=1;;
+ m) mono="-m";;
+ o) apfile="$OPTARG";;
+ s) scale="-s $OPTARG";;
+ w) width="-w $OPTARG";;
+ O) optimise="-O";;
+ D) debug="-D";;
+ esac
+ getopts "hvmo:s:w:OD" name
+done
+
+opts="$mono $optimise $scale $width"
+
+shift $[ $OPTIND - 1 ]
+
+if [ $help -eq 1 ]
+ then no_of_lines=`cat $0 | awk 'BEGIN { n = 0; } \
+ /^$/ { print n; \
+ exit; } \
+ { n++; }'`
+ echo "`head -$no_of_lines $0`"
+ exit
+fi
+
+
+if [ -z "$1" ]
+ then echo "Usage: $progname [options] file[.gr]"
+ echo "Use -h option for details"
+ exit 1;
+fi
+
+f="`basename $1 .gr`"
+grfile="$f".gr
+qpfile="${TMPDIR:-.}/$f".qp
+ppfile="${TMPDIR:-.}/$f".pp
+
+if [ -z "$apfile" ]
+ then apfile="$f"_ap.ps
+fi
+
+if [ $verb -eq 1 ]
+ then echo "Input file: $grfile"
+ echo "Quasi-parallel file: $qpfile"
+ echo "PostScript file: $apfile"
+ echo "Options forwarded to qp2ap: $opts"
+ if [ "$mono" = "-m" ]
+ then echo "Producing monochrome PS file"
+ else echo "Producing color PS file"
+ fi
+ if [ "$debug" = "-D" ]
+ then echo "Debugging is turned ON"
+ else echo "Debugging is turned OFF"
+ fi
+fi
+
+
+# unset noclobber
+
+if [ ! -f "$grfile" ]
+ then
+ echo "$grfile does not exist"
+ exit 1
+ else
+ # rm -f "$qpfile" "$apfile"
+ prog=`head -1 "$grfile" | sed -e 's/Granularity Simulation for //'`
+ echo "$prog" >| "$qpfile"
+ if [ $verb -eq 1 ]
+ then echo "Executed program: $prog"
+ fi
+ date >> "$qpfile"
+ #date="`date`" # This is the date of running the script
+ date="`tail +2 $grfile | head -1 | sed -e 's/Start time: //'`"
+ cat "$grfile" | gr2qp >> "$qpfile"
+ # Sorting is part of gr2qp now.
+ # | ghc-fool-sort | sort -n +0 -1 | ghc-unfool-sort >> "$qpfile"
+ # max=`tail -2 "$qpfile" | awk '!/^Number of threads:/ { print $1; }'`
+ xmax=`tail -1 "$qpfile" | awk '{ print $2; }'`
+ ymax=`tail -1 "$qpfile" | awk '{ print $8; }'`
+ if [ $verb -eq 1 ]
+ then echo "Total runtime: $xmax"
+ echo "Total number of tasks: $ymax"
+ fi
+ tail +3 "$qpfile" | qp2ap $opts "$xmax" "$ymax" "$prog" "$date" >| "$apfile"
+ rm -f "$qpfile"
+ # Old: qp2ap.pl $mono $max "$prog" "$date" < "$qpfile" > "$apfile"
+fi
+
diff --git a/ghc/utils/parallel/gr2gran.bash b/ghc/utils/parallel/gr2gran.bash
new file mode 100644
index 0000000000..0db4dab604
--- /dev/null
+++ b/ghc/utils/parallel/gr2gran.bash
@@ -0,0 +1,113 @@
+#!/usr/local/bin/bash
+##############################################################################
+# Last modified: Time-stamp: <95/08/01 02:21:56 hwloidl>
+#
+# Usage: gr2gran [options] <sim-file>
+#
+# Create granularity graphs for the GrAnSim profile <sim-file>. This creates
+# a bucket statistics and a cumulative runtimes graph.
+# This script is derived from the much more complex gran-extr script, which
+# also produces such graphs and much more information, too.
+#
+# Options:
+# -t <file> ... use <file> as template file (<,> global <.> local template)
+# -p <file> ... use <file> as gnuplot .gp file (default: gran.gp)
+# -x <x-size> ... of gnuplot graph
+# -y <y-size> ... of gnuplot graph
+# -n <n> ... use <n> as number of PEs in title
+# -o <file> ... keep the intermediate <file> (sorted list of all runtimes)
+# -h ... help; print this text.
+# -v ... verbose mode.
+#
+##############################################################################
+
+progname="`basename $0`"
+args="$*"
+
+help=0
+verb=0
+template=""
+plotfile=""
+x=""
+y=""
+n=""
+rtsfile=""
+keep_rts=0
+
+getopts "hvt:p:x:y:n:o:" name
+while [ "$name" != "?" ] ; do
+ case $name in
+ h) help=1;;
+ v) verb=1;;
+ t) template="-t $OPTARG";;
+ p) plotfile="-p $OPTARG";;
+ x) x="-x $OPTARG";;
+ y) y="-y $OPTARG";;
+ n) n="-n $OPTARG";;
+ o) rtsfile="$OPTARG";;
+ esac
+ getopts "hvt:p:x:y:n:o:" name
+done
+
+shift $[ $OPTIND - 1 ]
+
+if [ $help -eq 1 ]
+ then no_of_lines=`cat $0 | awk 'BEGIN { n = 0; } \
+ /^$/ { print n; \
+ exit; } \
+ { n++; }'`
+ echo "`head -$no_of_lines $0`"
+ exit
+fi
+
+if [ -z "$1" ]
+ then echo "Usage: $progname [options] file[.gr]"
+ echo "Use -h option for details"
+ exit 1;
+fi
+
+f="`basename $1 .gr`"
+grfile="${f}.gr"
+if [ -z "$rtsfile" ]
+ then rtsfile="${f}.rts"
+ rtsopt="-o $rtsfile"
+ else rtsopt="-o $rtsfile"
+ keep_rts=1
+fi
+
+opts_RTS="$rtsopt "
+opts_ps="$template $plotfile $x $y $n "
+
+if [ $verb -eq 1 ]
+ then echo "Input file: $grfile"
+ if [ ${keep_rts} -eq 1 ]
+ then echo "Intermediate file: $rtsfile (kept after termination)"
+ else echo "Intermediate file: $rtsfile (discarded at end)"
+ fi
+ verb_opt="-v "
+ opts_RTS="${opts_RTS} $verb_opt "
+ opts_ps="${opts_ps} $verb_opt "
+ echo "Options for gr2RTS: ${opts_RTS}"
+ echo "Options for RTS2gran: ${opts_ps}"
+fi
+
+
+# unset noclobber
+if [ ! -f "$grfile" ]
+ then
+ echo "$grfile does not exist"
+ exit 1
+ else
+ # rm -f "$rtsfile"
+ if [ $verb -eq 1 ]
+ then echo "gr2RTS ..."
+ fi
+ gr2RTS ${opts_RTS} $grfile
+ if [ $verb -eq 1 ]
+ then echo "RTS2gran ..."
+ fi
+ RTS2gran ${opts_ps} $rtsfile
+ if [ ${keep_rts} -ne 1 ]
+ then rm -f $rtsfile
+ fi
+fi \ No newline at end of file
diff --git a/ghc/utils/parallel/gr2java.pl b/ghc/utils/parallel/gr2java.pl
new file mode 100644
index 0000000000..acd0b5e631
--- /dev/null
+++ b/ghc/utils/parallel/gr2java.pl
@@ -0,0 +1,322 @@
+#!/usr/local/bin/perl
+##############################################################################
+#
+# Usage: gr2java [options]
+#
+# Filter that transforms a GrAnSim profile (a .gr file) at stdin to
+# a quasi-parallel profile (a .qp file). It is the common front-end for most
+# visualization tools (except gr2pe). It collects running,
+# runnable and blocked tasks in queues of different `colours', whose meaning
+# is:
+# G ... green; queue of all running tasks
+# A ... amber; queue of all runnable tasks
+# R ... red; queue of all blocked tasks
+# Y ... cyan; queue of fetching tasks
+# C ... crimson; queue of tasks that are being stolen
+# B ... blue; queue of all sparks
+#
+# Options:
+# -i <int> ... info level from 1 to 7; number of queues to count (see qp3ps)
+# -I <str> ... count tasks that are in one of the given queues; encoding:
+# 'a' ... active (running)
+# 'r' ... runnable
+# 'b' ... blocked
+# 'f' ... fetching
+# 'm' ... migrating
+# 's' ... sparks
+# (e.g. -I "arb" counts sum of active, runnable, blocked tasks)
+# -c ... check consistency of data (e.g. no neg. number of tasks)
+# -v ... be talkative.
+# -h ... print help message (this header).
+#
+##############################################################################
+
+require "getopts.pl";
+
+&Getopts('hvDSci:I:');
+
+do process_options();
+
+if ( $opt_v ) {
+ do print_verbose_message();
+}
+
+# ---------------------------------------------------------------------------
+# Init
+# ---------------------------------------------------------------------------
+
+$max = 0;
+$pmax = 0;
+$ptotal = 0;
+$n = 0;
+
+$active = 0;
+$runnable = 0;
+$blocked = 0;
+$fetching = 0;
+$migrating = 0;
+$sparks = 0;
+
+$improved_sort_option = $opt_S ? "-S" : "";
+
+open (FOOL,"| ghc-fool-sort $improved_sort_option | sort -n +0 -1 | ghc-unfool-sort") || die "FOOL";
+
+$in_header = 9;
+while(<>) {
+ if ( $in_header == 9 ) {
+ if (/^=/) {
+ $gum_style_gr = 1;
+ $in_header = 0;
+ } else {
+ $gum_style_gr = 0;
+ $in_header = 1;
+ }
+
+ }
+ if (/^\++$/) {
+ $in_header=0;
+ next;
+ }
+ next if $in_header;
+ next if /^$/;
+ next if /^=/;
+ chop;
+ ($PE, $pe, $time, $act, $tid, $rest) = split;
+ $time =~ s/[\[\]:]//g;
+ # next if $act eq 'REPLY';
+ chop($tid) if $act eq 'END';
+ $from = $queue{$tid};
+ $extra = "";
+ if ($act eq 'START') {
+ $from = '*';
+ $to = 'G';
+ $n++;
+ if ( $n > $pmax ) { $pmax = $n; }
+ $ptotal++;
+ } elsif ($act eq 'START(Q)') {
+ $from = '*';
+ $to = 'A';
+ $n++;
+ if ( $n > $pmax ) { $pmax = $n; }
+ $ptotal++;
+ } elsif ($act eq 'STEALING') {
+ $to = 'C';
+ } elsif ($act eq 'STOLEN') {
+ $to = 'G';
+ } elsif ($act eq 'STOLEN(Q)') {
+ $to = 'A';
+ } elsif ($act eq 'FETCH') {
+ $to = 'Y';
+ } elsif ($act eq 'REPLY') {
+ $to = 'R';
+ } elsif ($act eq 'BLOCK') {
+ $to = 'R';
+ } elsif ($act eq 'RESUME') {
+ $to = 'G';
+ $extra = " 0 0x0";
+ } elsif ($act eq 'RESUME(Q)') {
+ $to = 'A';
+ $extra = " 0 0x0";
+ } elsif ($act eq 'END') {
+ $to = '*';
+ $n--;
+ if ( $opt_c && $n < 0 ) {
+ print STDERR "Error at time $time: neg. number of tasks: $n\n";
+ }
+ } elsif ($act eq 'SCHEDULE') {
+ $to = 'G';
+ } elsif ($act eq 'DESCHEDULE') {
+ $to = 'A';
+ # The following are only needed for spark profiling
+ } elsif (($act eq 'SPARK') || ($act eq 'SPARKAT')) {
+ $from = '*';
+ $to = 'B';
+ } elsif ($act eq 'USED') {
+ $from = 'B';
+ $to = '*';
+ } elsif ($act eq 'PRUNED') {
+ $from = 'B';
+ $to = '*';
+ } elsif ($act eq 'EXPORTED') {
+ $from = 'B';
+ $to = 'B';
+ } elsif ($act eq 'ACQUIRED') {
+ $from = 'B';
+ $to = 'B';
+ } else {
+ print STDERR "Error at time $time: unknown event $act\n";
+ }
+ $queue{$tid} = $to;
+
+ if ( $from eq '' ) {
+ print STDERRR "Error at time $time: process $tid has no from queue\n";
+ }
+ if ($to ne $from) {
+ print FOOL $time, " ", $pe, " ",
+ $from, $to, "\n";
+ }
+
+ if ($to ne $from) {
+ # Compare with main loop in qp3ps
+ if ($from eq '*') {
+ } elsif ($from eq 'G') {
+ --$active;
+ } elsif ($from eq 'A') {
+ --$runnable;
+ } elsif ($from eq 'R') {
+ --$blocked;
+ } elsif ($from eq 'B') {
+ --$sparks;
+ } elsif ($from eq 'C') {
+ --$migrating;
+ } elsif ($from eq 'Y') {
+ --$fetching;
+ } else {
+ print STDERR "Illegal from char: $from at $time\n";
+ }
+
+ if ($to eq '*') {
+ } elsif ($to eq 'G') {
+ ++$active;
+ } elsif ($to eq 'A') {
+ ++$runnable;
+ } elsif ($to eq 'R') {
+ ++$blocked;
+ } elsif ($to eq 'B') {
+ ++$sparks;
+ } elsif ($to eq 'C') {
+ ++$migrating;
+ } elsif ($to eq 'Y') {
+ ++$fetching;
+ } else {
+ print STDERR "Illegal to char: $to at $time\n";
+ }
+
+ }
+
+ $curr = &count();
+ if ( $curr > $max ) {
+ $max = $curr;
+ }
+
+ if ( 0 ) {
+ print STDERR "%% $time: (act,runnable,blocked,fetch,mig,sp) = " .
+ "($active, $runnable, $blocked, $fetching, $migrating, $sparks)".
+ " max = $max\n" ;
+ }
+
+ #print STDERR "Sparks @ $time: $sparks \tCurr: $curr \tMax: $max \n" if $opt_D;
+
+ if ( $time > $tmax ) {
+ $tmax = $time;
+ }
+ delete $queue{$tid} if $to eq '*';
+
+}
+
+print "Time: ", $tmax, " Max_selected_tasks: ", $max,
+ " Max_running_tasks: ", $pmax, " Total_tasks: ", $ptotal, "\n";
+
+close(FOOL);
+
+exit 0;
+
+# +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+# Copied from qp3ps and slightly modified (we don't keep a list for each queue
+# but just compute the max value we get out of all calls to count during the
+# execution of the script).
+# -----------------------------------------------------------------------------
+
+# -----------------------------------------------------------------------------
+
+sub queue_on {
+ local ($queue) = @_;
+
+ return index($show,$queue)+1;
+}
+
+# -----------------------------------------------------------------------------
+
+sub count {
+ local ($res);
+
+ $res = (($queue_on_a) ? $active : 0) +
+ (($queue_on_r) ? $runnable : 0) +
+ (($queue_on_b) ? $blocked : 0) +
+ (($queue_on_f) ? $fetching : 0) +
+ (($queue_on_m) ? $migrating : 0) +
+ (($queue_on_s) ? $sparks : 0);
+
+ return $res;
+}
+
+# -----------------------------------------------------------------------------
+# DaH 'oH lo'lu'Qo'
+# -----------------------------------------------------------------------------
+
+sub set_values {
+ local ($samples,
+ $active,$runnable,$blocked,$fetching,$sparks,$migrating) = @_;
+
+ $G[$samples] = queue_on_a ? $active : 0;
+ $A[$samples] = queue_on_r ? $runnable : 0;
+ $R[$samples] = queue_on_b ? $blocked : 0;
+ $Y[$samples] = queue_on_f ? $fetching : 0;
+ $B[$samples] = queue_on_s ? $sparks : 0;
+ $C[$samples] = queue_on_m ? $migrating : 0;
+}
+
+# -----------------------------------------------------------------------------
+
+sub process_options {
+ if ( $opt_h ) {
+ open(ME,$0) || die "Can't open myself ($0): $!\n";
+ $n = 0;
+ while (<ME>) {
+ last if $_ =~ /^$/;
+ print $_;
+ $n++;
+ }
+ close(ME);
+ exit ;
+ }
+
+ $show = "armfb";
+
+ if ( $opt_i ) {
+ $show = "a" if info_level == 1;
+ $show = "ar" if info_level == 2;
+ $show = "arb" if info_level == 3;
+ $show = "arfb" if info_level == 4;
+ $show = "armfb" if info_level == 5;
+ $show = "armfbs" if info_level == 6;
+ }
+
+ if ( $opt_I ) {
+ $show = $opt_I;
+ }
+
+ if ( $opt_v ){
+ $verbose = 1;
+ }
+
+ $queue_on_a = &queue_on("a");
+ $queue_on_r = &queue_on("r");
+ $queue_on_b = &queue_on("b");
+ $queue_on_f = &queue_on("f");
+ $queue_on_s = &queue_on("s");
+ $queue_on_m = &queue_on("m");
+}
+
+sub print_verbose_message {
+
+ print STDERR "Info-str: $show\n";
+ print STDERR "The following queues are turned on: " .
+ ( $queue_on_a ? "active, " : "") .
+ ( $queue_on_r ? "runnable, " : "") .
+ ( $queue_on_b ? "blocked, " : "") .
+ ( $queue_on_f ? "fetching, " : "") .
+ ( $queue_on_m ? "migrating, " : "") .
+ ( $queue_on_s ? "sparks" : "") .
+ "\n";
+}
diff --git a/ghc/utils/parallel/gr2jv.bash b/ghc/utils/parallel/gr2jv.bash
new file mode 100644
index 0000000000..7eeacfe556
--- /dev/null
+++ b/ghc/utils/parallel/gr2jv.bash
@@ -0,0 +1,123 @@
+#!/usr/local/bin/bash
+##############################################################################
+# Time-stamp: <Wed Jul 24 1996 20:38:02 Stardate: [-31]7859.09 hwloidl>
+#
+# Usage: gr3jv [options] <gr-file>
+#
+# Create a per-thread activity graph from a GrAnSim (or GUM) profile.
+# Transform the log file of a GrAnSim run (a .gr file) into a quasi-parallel
+# profile (a .qp file) using gr3qp and then into a PostScript file using qp3ap.
+# The generated PostScript file shows one horizontal line for each task. The
+# thickness of the line indicates the state of the thread:
+# thick ... active, medium ... suspended, thin ... fetching remote data
+#
+# Options:
+# -o <file> ... write .ps file to <file>
+# -m ... create mono PostScript file instead a color one.
+# -O ... optimise i.e. try to minimise the size of the .ps file.
+# -v ... be talkative.
+# -h ... print help message (this header).
+#
+##############################################################################
+
+progname="`basename $0`"
+args="$*"
+
+verb=0
+help=0
+mono=""
+apfile=""
+optimise=""
+scale=""
+width=""
+
+getopts "hvmo:s:w:OD" name
+while [ "$name" != "?" ] ; do
+ case $name in
+ h) help=1;;
+ v) verb=1;;
+ m) mono="-m";;
+ o) apfile="$OPTARG";;
+ s) scale="-s $OPTARG";;
+ w) width="-w $OPTARG";;
+ O) optimise="-O";;
+ D) debug="-D";;
+ esac
+ getopts "hvmo:s:w:OD" name
+done
+
+opts="$mono $optimise $scale $width"
+
+shift $[ $OPTIND - 1 ]
+
+if [ $help -eq 1 ]
+ then no_of_lines=`cat $0 | awk 'BEGIN { n = 0; } \
+ /^$/ { print n; \
+ exit; } \
+ { n++; }'`
+ echo "`head -$no_of_lines $0`"
+ exit
+fi
+
+
+if [ -z "$1" ]
+ then echo "Usage: $progname [options] file[.gr]"
+ echo "Use -h option for details"
+ exit 1;
+fi
+
+f="`basename $1 .gr`"
+grfile="$f".gr
+qpfile="$f".qp
+ppfile="$f".pp
+jvfile="$f".jv
+
+if [ -z "$apfile" ]
+ then apfile="$f"-ap.ps
+fi
+
+if [ $verb -eq 1 ]
+ then echo "Input file: $grfile"
+ echo "Quasi-parallel file: $qpfile"
+ echo "PostScript file: $apfile"
+ echo "Options forwarded to qp3ap: $opts"
+ if [ "$mono" = "-m" ]
+ then echo "Producing monochrome PS file"
+ else echo "Producing color PS file"
+ fi
+ if [ "$debug" = "-D" ]
+ then echo "Debugging is turned ON"
+ else echo "Debugging is turned OFF"
+ fi
+fi
+
+
+# unset noclobber
+
+if [ ! -f "$grfile" ]
+ then
+ echo "$grfile does not exist"
+ exit 1
+ else
+ # rm -f "$qpfile" "$apfile"
+ prog=`head -1 "$grfile" | sed -e 's/Granularity Simulation for //'`
+ echo "$prog" >| "$jvfile"
+ if [ $verb -eq 1 ]
+ then echo "Executed program: $prog"
+ fi
+ date >> "$jvfile"
+ #date="`date`" # This is the date of running the script
+ date="`tail +2 $grfile | head -1 | sed -e 's/Start-Time: //'`"
+ cat "$grfile" | gr2java >> "$jvfile"
+ # Sorting is part of gr2qp now.
+ # | ghc-fool-sort | sort -n +0 -1 | ghc-unfool-sort >> "$qpfile"
+ # max=`tail -2 "$qpfile" | awk '!/^Number of threads:/ { print $1; }'`
+ xmax=`tail -1 "$jvfile" | awk '{ print $2; }'`
+ ymax=`tail -1 "$jvfile" | awk '{ print $8; }'`
+ if [ $verb -eq 1 ]
+ then echo "Total runtime: $xmax"
+ echo "Total number of tasks: $ymax"
+ fi
+ # Old: qp2ap.pl $mono $max "$prog" "$date" < "$qpfile" > "$apfile"
+fi
+
diff --git a/ghc/utils/parallel/gr2pe.pl b/ghc/utils/parallel/gr2pe.pl
new file mode 100644
index 0000000000..6026300758
--- /dev/null
+++ b/ghc/utils/parallel/gr2pe.pl
@@ -0,0 +1,1434 @@
+#!/usr/local/bin/perl
+# (C) Hans Wolfgang Loidl, November 1994
+# ############################################################################
+# Time-stamp: <Fri Jun 14 1996 20:21:17 Stardate: [-31]7659.03 hwloidl>
+#
+# Usage: gr2pe [options] <gr-file>
+#
+# Create per processor activity profile (as ps-file) from a given gr-file.
+#
+# Options:
+# -o <file> ... output file (ps file) has name <file>
+# -m ... produce monochrome output
+# -M ... produce a migration graph
+# -S ... produce a spark graph in a separate file (based on the no. of
+# sparks rather than the no. of runnable threads)
+# -t ... produce trace of runnable, blocked, fetching threads
+# -i <n> ... ``infinity'' for number of blocked tasks (default: 20)
+# all values larger than that are shown with the same width
+# -C ... do consistency check at each event (mainly for debugging)
+# -h ... print help message (this text)
+# -v ... be talkative
+#
+# ############################################################################
+
+# die "This script is still under development -- HWL\n";
+
+# ----------------------------------------------------------------------------
+# Command line processing and initialization
+# ----------------------------------------------------------------------------
+
+require "getopts.pl";
+
+&Getopts('hvDCMNmSGti:o:l:p:');
+
+do process_options();
+
+if ( $opt_v ) {
+ do print_verbose_message();
+}
+
+# ----------------------------------------------------------------------------
+# Global Variables
+# ----------------------------------------------------------------------------
+
+$RUNNING = "RUNNING";
+$RUNNABLE = "RUNNABLE";
+$BLOCKED = "BLOCKED";
+$START = "START";
+$END = "END";
+
+# Modes for hline
+#$LITERATE = 1;
+#$NORMALIZING = 2;
+
+%GRAY = (
+ $RUNNING, 0.6,
+ $RUNNABLE, 0.3,
+ $BLOCKED, 0,
+ $START, 0,
+ $END, 0.5);
+
+# Special value showing that no task is running on $pe if in $running[$pe]
+$NO_ID = -1;
+$NO_LAST_BG = $NO_LAST_BLOCKED = $NO_LAST_START = -1;
+
+# The number of PEs we have
+$nPEs = 32;
+
+# Unit (in pts) of the width for BLOCKED and RUNNABLE line segments
+$width_unit = 1;
+
+# Width of line for RUNNING
+$running_width = 1;
+
+# Offset of BLOCKED and RUNNABLE lines from the center line
+$offset = 10;
+
+# Left and right border of the picture; Width of the picture
+$left_border = 0;
+$right_border = 700;
+$total_width = $right_border - $left_border;
+$x_scale = 1;
+
+# Height of the picture measured from y-val of first to y-val of last PE
+$lower_border = 10;
+$upper_border = 490;
+$total_height = $upper_border - $lower_border;
+$y_scale = 1;
+
+# Constant from where shrinking of x-values (+scaling as usual) is enabled
+$very_big = 1E8;
+
+# Factor by which the x values are shrunk (if very big)
+$shrink_x = 10000;
+
+# Set format of output of numbers
+$# = "%.2g";
+
+# Width of stripes in migration graph
+$tic_width = 2;
+
+# If no spark profile should be generate we count the number of spark events
+# in the profile to inform the user about existing spark information
+if ( !$opt_S ) {
+ $spark_events = 0;
+}
+
+# ----------------------------------------------------------------------------
+# The real thing starts here
+# ----------------------------------------------------------------------------
+
+open (IN,"<$input") || die "$input: $!\n";
+open (OUT,">$output") || die "$output: $!\n";
+open (OUT_MIG,">$output_mig") || die "$output_mig: $!\n" if $opt_M;
+open (OUT_SP,">$output_sp") || die "$output_sp: $!\n" if $opt_S;
+# open (OUT_B,">$output_b") || die "$output_b: $!\n";
+# open (OUT_R,">$output_r") || die "$output_r: $!\n";
+
+open(OUT_RA, ">$RUNNABLE_file") || die "$RUNNABLE_file: $!\n" if $opt_t;
+print OUT_RA "# Number of Runnable tasks on all PEs $i\n" if $opt_t;
+open(OUT_BA, ">$BLOCKED_file") || die "$BLOCKED_file: $!\n" if $opt_t;
+print OUT_BA "# Number of Blocked tasks on all PEs $i\n" if $opt_t;
+open(OUT_FA, ">$FETCHING_file") || die "$FETCHING_file: $!\n" if $opt_t;
+print OUT_FA "# Number of Fetching tasks on all PEs $i\n" if $opt_t;
+
+($pname,$pars,$nPEs,$lat) = &skip_header(IN);
+
+
+# Fill in the y_val table for all PEs
+$offset = (&generate_y_val_table($nPEs)/2);
+
+$x_min = 0;
+$x_max = &get_x_max($input);
+$y_max = $total_height;
+#$y_max = $y_val[$nPEs-1] + offset;
+
+$is_very_big = $x_max > $very_big;
+
+# Max width allowed when drawing lines for BLOCKED, RUNNABLE tasks
+$max_width = $offset;
+
+# General init
+do init($nPEs);
+
+do write_prolog(OUT,$x_max,$y_max);
+do write_prolog(OUT_MIG,$x_max,$y_max) if $opt_M;
+do write_prolog(OUT_SP,$x_max,$y_max) if $opt_S;
+# do write_prolog(OUT_B,$x_max,$y_max);
+# do write_prolog(OUT_R,$x_max,$y_max);
+
+while (<IN>) {
+ next if /^$/; # Omit empty lines;
+ next if /^--/; # Omit comment lines;
+
+ ($event, $time, $id, $pe) = &get_line($_);
+ $x_max_ = $time if $time > $x_max_;
+
+ print OUT_RA "TIME: $time PEs: " . join(", ",@runnable) .
+ " SUM: " . &list_sum(@runnable) . "\n" if $opt_t;
+ print OUT_BA "TIME: $time PEs: " . join(", ",@blocked) .
+ " SUM: " . &list_sum(@blocked) . "\n" if $opt_t;
+ print OUT_FA "TIME: $time PEs: " . join(", ",@fetching) .
+ " SUM: " . &list_sum(@fetching) . "\n" if $opt_t;
+
+ foo : {
+ ($event eq "START") && do {
+ # do draw_tic($pe, $time, $START);
+ do draw_bg($pe, $time);
+ $last_bg[$pe] = $time;
+ $running[$pe] = $id;
+ # $where{$id} = $pe + 1;
+ last foo;
+ };
+ ($event eq "START(Q)") && do {
+ #do draw_segment($pe, $time, $RUNNABLE);
+ do draw_bg($pe, $time);
+ $last_bg[$pe] = $time;
+ #$last_runnable[$pe] = $time;
+ $runnable[$pe]++;
+ # $where{$id} = $pe + 1;
+ last foo;
+ };
+ ($event eq "STEALING") && do {
+ do draw_bg($pe, $time);
+ $last_bg[$pe] = $time;
+ $runnable[$pe]--;
+ $where{$id} = $pe + 1;
+ if ( $opt_M ) {
+ $when{$id} = $time;
+ do draw_tic($pe, $time, $event);
+ }
+ last foo;
+ };
+ ($event eq "STOLEN") && do {
+ # do draw_tic($pe, $time, $START);
+ do draw_bg($pe, $time);
+ $last_bg[$pe] = $time;
+ $running[$pe] = $id;
+ if ( $where{$id} ) {
+ # Ok
+ } else {
+ $warn++;
+ print "WARNING: No previous location for STOLEN task $id found!" .
+ " Check the gr file!\n";
+ }
+ if ( $opt_M ) {
+ do draw_tic($pe, $time, $event);
+ do draw_arrow($where{$id}-1,$pe,$when{$id},$time);
+ }
+ last foo;
+ };
+ ($event eq "STOLEN(Q)") && do {
+ #do draw_segment($pe, $time, $RUNNABLE);
+ do draw_bg($pe, $time);
+ $last_bg[$pe] = $time;
+ #$last_runnable[$pe] = $time;
+ $runnable[$pe]++;
+ if ( $where{$id} ) {
+ # Ok
+ } else {
+ $warn++;
+ print "WARNING: No previous location for STOLEN(Q) task $id found!" .
+ " Check the gr file!\n";
+ }
+ if ( $opt_M ) {
+ do draw_tic($pe, $time, $event);
+ do draw_arrow($where{$id}-1,$pe,$when{$id},$time);
+ }
+ last foo;
+ };
+ ($event eq "BLOCK") && do {
+ do draw_bg($pe, $time);
+ $last_bg[$pe] = $time;
+ do draw_segment($pe, $time, $BLOCKED) unless $blocked[$pe] == 0 ;
+ $last_blocked[$pe] = $time;
+ #do draw_segment($pe, $time, $RUNNING);
+ $blocked[$pe]++;
+ $running[$pe] = $NO_ID;
+ last foo;
+ };
+ ($event eq "RESUME") && do {
+ # do draw_tic($pe, $time, $START);
+ do draw_bg($pe, $time);
+ $last_bg[$pe] = $time;
+ do draw_segment($pe, $time, $BLOCKED);
+ $last_blocked[$pe] = $time;
+ $blocked[$pe]--;
+ $running[$pe] = $id;
+ last foo;
+ };
+ ($event eq "RESUME(Q)") && do {
+ #do draw_segment($pe, $time, $RUNNABLE);
+ do draw_bg($pe, $time);
+ $last_bg[$pe] = $time;
+ do draw_segment($pe, $time, $BLOCKED);
+ $last_blocked[$pe] = $time;
+ #$last_runnable[$pe] = $time;
+ $blocked[$pe]--;
+ $runnable[$pe]++;
+ last foo;
+ };
+ ($event eq "END") && do {
+ # do draw_tic($pe, $time, $END);
+ do draw_bg($pe, $time);
+ $last_bg[$pe] = $time;
+ $running[$pe] = $NO_ID;
+ # do draw_segment($pe, $time, $RUNNING);
+ # $last_blocked[$pe] = $time;
+ last foo;
+ };
+ ($event eq "SCHEDULE") && do {
+ # do draw_tic($pe, $time);
+ $last_start[$pe] = $time;
+ do draw_bg($pe, $time);
+ $last_bg[$pe] = $time;
+ $runnable[$pe]--;
+ $running[$pe] = $id;
+ last foo;
+ };
+ # NB: Check these; they are not yet tested
+ ($event eq "FETCH") && do {
+ # Similar to BLOCK; but don't draw a block segment
+ do draw_bg($pe, $time);
+ $last_bg[$pe] = $time;
+ #do draw_segment($pe, $time, $BLOCKED) unless $blocked[$pe] == 0 ;
+ #$last_blocked[$pe] = $time;
+ #$blocked[$pe]++;
+ $fetching[$pe]++;
+ $running[$pe] = $NO_ID;
+ last foo;
+ };
+ ($event eq "REPLY") && do {
+ do draw_bg($pe, $time);
+ $last_bg[$pe] = $time;
+ #do draw_segment($pe, $time, $BLOCKED);
+ #$last_blocked[$pe] = $time;
+ #$blocked[$pe]--;
+ $fetching[$pe]--;
+ $blocked[$pe]++;
+ last foo;
+ };
+ # These are only processed if a spark pofile is generated, too
+ (($event eq "SPARK") || ($event eq "SPARKAT") || ($event eq "ACQUIRED")) && do {
+ if ( !opt_S ) {
+ $spark_events++;
+ last foo;
+ }
+ do draw_sp_bg($pe, $time);
+ $last_sp_bg[$pe] = $time;
+ $sparks[$pe]++;
+ last foo;
+ };
+
+ (($event eq "USED") || ($event eq "PRUNED") || ($event eq "EXPORTED")) && do {
+ if ( !opt_S ) {
+ $spark_events++;
+ last foo;
+ }
+ do draw_sp_bg($pe, $time);
+ $last_sp_bg[$pe] = $time;
+ $sparks[$pe]--;
+ if ( $sparks[$pe]<0 ) {
+ print STDERR "Error: Neg. number of sparks @ $time\n";
+ }
+ last foo;
+ };
+
+ $warn++;
+ print "WARNING: Unknown event: $event\n";
+ }
+ do check_consistency() if $opt_M;
+}
+
+do write_epilog(OUT,$x_max,$y_max);
+do write_epilog(OUT_MIG,$x_max,$y_max) if $opt_M;
+do write_epilog(OUT_SP,$x_max,$y_max) if $opt_S;
+# do write_epilog(OUT_B,$x_max,$y_max);
+# do write_epilog(OUT_R,$x_max,$y_max);
+
+close(IN);
+close(OUT);
+# close(OUT_B);
+# close(OUT_R);
+
+close(OUT_MIG) if $opt_M;
+close(OUT_SP) if $opt_S;
+close(OUT_BA) if $opt_t;
+close(OUT_RA) if $opt_t;
+close(OUT_FA) if $opt_t;
+
+#for ($i=0; $i<$nPEs; $i++) {
+# close($OUT_BA[$i]);
+# close($OUT_RA[$i]);
+#}
+
+if ($x_max != $x_max_ ) {
+ print STDERR "WARNING: Max time ($x_max_) is different from time of last event ($x_max)\n";
+}
+
+print "Number of suppressed warnings: $warn\n" if $warn>0;
+print "FYI: The file $input contains $spark_events lines of spark information\n" if !opt_S && ($spark_events>0);
+
+system "gzip -f1 $RUNNABLE_file" if $opt_t;
+system "gzip -f1 $BLOCKED_file" if $opt_t;
+system "gzip -f1 $FETCHING_file" if $opt_t;
+
+system "fortune -s" if $opt_v;
+
+exit 0;
+
+# ----------------------------------------------------------------------------
+# This translation is mainly taken from gr2qp.awk
+# This subroutine returns the event found on the current line together with
+# the relevant information for that event. The possible EVENTS are:
+# START, STARTQ, STOLEN, BLOCK, RESUME, RESUMEQ, END, SCHEDULE
+# ----------------------------------------------------------------------------
+
+sub get_line {
+ local ($line) = @_;
+ local ($f, @fs);
+ local ($event, $time, $id, $pe);
+
+ @fs = split(/[:\[\]\s]+/,$line);
+ $event = $fs[3];
+ $time = $fs[2];
+ $id = $fs[4];
+ $pe = $fs[1];
+
+ print OUT "% > " . $_ if $opt_D;
+ print OUT "% EVENT = $event; TIME = $time; ID = $id; PE = $pe\n" if $opt_D;
+ print OUT "% --> this task comes from PE " . ($where{$id}-1) . "\n" if $opt_D && $event eq "STOLEN";
+
+ return ($event, $time, $id, $pe);
+
+ # if ($fs[3] eq "START") {
+ # partprofile = 0;
+ # print (substr($3,2,length($3)-3))," *G 0 0x" $5;
+ # }
+ # if ($fs[3] eq "START(Q)") {
+ # print (substr($3,2,length($3)-3))," *A 0 0x" $5;
+ # }
+
+ # if ($fs[3] eq "STOLEN") {
+ # print (substr($3,2,length($3)-3))," AG 0 0x" $5;
+ # }
+
+ # if ($fs[3] eq "BLOCK") {
+ # print (substr($3,2,length($3)-3))," GR 0 0x" $5;
+ # }
+ # if ($fs[3] eq "RESUME") {
+ # print (substr($3,2,length($3)-3))," RG 0 0x" $5, "0 0x0";
+ # }
+ # if ($fs[3] eq "RESUME(Q)") {
+ # print (substr($3,2,length($3)-3))," RA 0 0x" $5, "0 0x0";
+ # }
+ # if ($fs[3] eq "END") {
+ # if (partprofile) {
+ # p rint (substr($9,1,length($9)-1))," *G 0 0x" (substr($5,1,length($5)-1));
+ # p rint (substr($3,2,length($3)-3))," G* 0 0x" (substr($5,1,length($5)-1));
+ # } else {
+ # print (substr($3,2,length($3)-3))," G* 0 0x" (substr($5,1,length($5)-1));
+ # }
+ # }
+ # if ($fs[3] eq "SCHEDULE") {
+ # print (substr($3,2,length($3)-3))," AG 0 0x" $5;
+ # }
+
+}
+
+# ----------------------------------------------------------------------------
+
+sub check_consistency {
+ local ($i);
+
+ for ($i=0; $i<$nPEs; $i++) {
+ if ( $runnable[$i] < 0 ) {
+ print "INCONSISTENCY: PE $i: Size of runnable queue: $runnable[$i] at time $time\n";
+ $runnable[$i] = 0 ;
+ }
+ if ( $blocked[$i] < 0 ) {
+ print "INCONSISTENCY: PE $i: Size of blocked queue: $blocked[$i] at time $time\n";
+ $blocked[$i] = 0 ;
+ }
+ }
+}
+
+# ----------------------------------------------------------------------------
+
+sub get_width {
+ local ($n, $type) = @_;
+
+ $warn++ if $n <0;
+ print "WARNING: Neg. number of tasks in $type queue: $n!!\n" if $n <0;
+ $n = 0 if $n <0;
+ return ( ($type eq $RUNNING) ? ($running_width * $width_unit) :
+ &min($max_width, $n * $width_unit) );
+}
+
+# ----------------------------------------------------------------------------
+# Use an intensity between 0 (empty runnable queue) and 1 (`full' runnable
+# queue) to abstract from monchrome/color values
+# The concrete grayshade/color is computed via PS macros.
+# ----------------------------------------------------------------------------
+
+sub get_intensity {
+ local ($n) = @_;
+
+ print "SEVERE WARNING: get_intensity: Negative size of runnable queue\n" if $n<0;
+
+ if ($n >= $inf_block) {
+ return 1.0;
+ } else {
+ return ($n+1)/$inf_block;
+ }
+}
+
+# ----------------------------------------------------------------------------
+
+sub get_sp_intensity {
+ local ($n) = @_;
+
+ print "SEVERE WARNING: get_sp_intensity: Negative size of sparks queue\n" if $n<0;
+
+ if ($n >= $inf_block) {
+ return 1.0;
+ } else {
+ return ($n+1)/$inf_block;
+ }
+}
+
+# ----------------------------------------------------------------------------
+
+sub get_shade {
+ local ($n) = @_;
+
+
+ if ($n > $inf_block) {
+ return 0.2;
+ } else {
+ return 0.8 - ($n/$inf_block);
+ }
+}
+
+# ----------------------------------------------------------------------------
+
+sub max {
+ local($x, $y) = @_;
+
+ return ($x>$y ? $x : $y);
+}
+
+# ----------------------------------------------------------------------------
+
+sub min {
+ local($x, $y) = @_;
+
+ return ($x<$y ? $x : $y);
+}
+
+# ----------------------------------------------------------------------------
+
+sub list_sum {
+ local (@list) = @_;
+
+ local ($sum);
+
+ foreach $x (@list) {
+ $sum += $x;
+ }
+
+ return ($sum);
+}
+
+# ----------------------------------------------------------------------------
+# Drawing functions.
+# Put on top of funtions that directly generate PostScript.
+# ----------------------------------------------------------------------------
+
+sub draw_segment {
+ local ($pe, $time, $type) = @_;
+ local ($x, $y, $width, $gray);
+
+ if ( $type eq $BLOCKED ) {
+ if ( $last_blocked[$pe] == $NO_LAST_BLOCKED ) { return; };
+ $width = &get_width($blocked[$pe], $type);
+ if ( $width == 0 ) { return; };
+ $y = $stripes_low[$pe] + int($width/2 + 0.5);
+ $x = $last_blocked[$pe];
+
+ if ( $is_very_big ) {
+ $x = int($x/$shrink_x) + 1; # rounded up
+ }
+
+ # $gray = 0.5; # Ignoring gray level; doesn't change!
+ do ps_draw_hline(OUT,$x,$y,$time,$width);
+ } else {
+ die "ERROR: Unknow type of line: $type in draw segment\n";
+ }
+
+ if ($x < 0 || $y<0) {
+ die "Impossiple arguments for ps_draw_hline: ($x,$y); type=$type\n";
+ }
+ if ($width<0 || $width>$max_width || $gray <0 || $gray > 1) {
+ die "Impossible arguments to ps_draw_hline: width=$width; gray=$gray\n";
+ }
+}
+
+# ----------------------------------------------------------------------------
+
+sub draw_tic {
+ local ($pe, $time, $event) = @_;
+ local ($x, $y, $lit);
+
+ $ystart = $stripes_low[$pe];
+ $yend = $stripes_high[$pe];
+ $x = $time;
+ if ( $event eq "STEALING" ) {
+ $lit = 0; # i.e. FROM
+ } elsif ( ( $event eq "STOLEN") || ( $event eq "STOLEN(Q)" ) ) {
+ $lit = 1; # i.e. TO
+ } else {
+ die "ERROR: Wrong event $event in draw_tic\n";
+ }
+
+ if ( $is_very_big ) {
+ $x = int($x/$shrink_x) + 1; # rounded up
+ }
+
+ if ($x < 0 || $ystart<0 || $yend<0) {
+ die "Impossiple arguments for ps_draw_tic: ($x,$ystart,$yend); PE=$pe\n";
+ }
+ do ps_draw_tic(OUT_MIG,$x,$ystart,$yend,$lit);
+}
+
+# ----------------------------------------------------------------------------
+
+sub draw_bg {
+ local ($pe,$time) = @_;
+ local ($x_start, $x_end, $intensity, $secondary_intensity);
+
+ if ( $last_bg[$pe] == $NO_LAST_BG ) {
+ print OUT "% Omitting BG: NO LAST BG\n" if $opt_D;
+ return;
+ }
+ if ( $running[$pe] == $NO_ID ) {
+ print OUT "% BG: NO RUNNING PE -> idle bg\n" if $opt_D;
+ # return;
+ }
+ $x_start = $last_bg[$pe];
+ $x_end = $time;
+ $intensity = ( $running[$pe] == $NO_ID ?
+ 0 :
+ &get_intensity($runnable[$pe]) );
+ $secondary_intensity = ( $running[$pe] == $NO_ID ?
+ 0 :
+ &get_intensity($fetching[$pe]) );
+ do ps_draw_bg(OUT,$x_start, $x_end, $stripes_low[$pe], $stripes_high[$pe],
+ $intensity,$secondary_intensity);
+
+ if ( $opt_M ) {
+ do ps_draw_hline(OUT_MIG, $x_start, $stripes_low[$pe], $x_end,
+ $mig_width);
+ }
+
+}
+
+# ----------------------------------------------------------------------------
+# Variant of draw_bg; used for spark profile
+# ----------------------------------------------------------------------------
+
+sub draw_sp_bg {
+ local ($pe,$time) = @_;
+ local ($x_start, $x_end, $intensity, $secondary_intensity);
+
+ if ( $last_sp_bg[$pe] == $NO_LAST_BG ) {
+ print OUT_SP "% Omitting BG: NO LAST BG\n" if $opt_D;
+ return;
+ }
+ $x_start = $last_sp_bg[$pe];
+ $x_end = $time;
+ $intensity = ( $sparks[$pe] <= 0 ?
+ 0 :
+ &get_sp_intensity($sparks[$pe]) );
+ $secondary_intensity = 0;
+ do ps_draw_bg(OUT_SP,$x_start, $x_end, $stripes_low[$pe], $stripes_high[$pe],
+ $intensity,$secondary_intensity);
+
+}
+
+# ----------------------------------------------------------------------------
+
+sub draw_arrow {
+ local ($from_pe,$to_pe,$send_time,$arrive_time) = @_;
+ local ($ystart,$yend);
+
+ $ystart = $stripes_high[$from_pe];
+ $yend = $stripes_low[$to_pe];
+ do ps_draw_arrow(OUT_MIG,$send_time,$arrive_time,$ystart,$yend);
+}
+
+# ----------------------------------------------------------------------------
+# Normalize the x value s.t. it fits onto the page without scaling.
+# The global values $left_border and $right_border and $total_width
+# determine the borders
+# of the graph.
+# This fct is only called from within ps_... fcts. Before that the $x values
+# are always times.
+# ----------------------------------------------------------------------------
+
+sub normalize {
+ local ($x) = @_;
+
+ return (($x-$xmin)/($x_max-$x_min) * $total_width + $left_border);
+}
+
+# ----------------------------------------------------------------------------
+# PostScript generation functions.
+# Lowest level of writing output file.
+# Now there is only normalizing mode supported.
+# The following is out of date:
+# $mode can be $LITERATE i.e. assuming scaling has been done
+# or $NORMALIZING i.e. no scaling has been done so far (do it in
+# macros for drawing)
+# ----------------------------------------------------------------------------
+
+sub ps_draw_hline {
+ local ($OUT,$xstart,$y,$xend,$width) = @_;
+ local ($xlen);
+
+ print $OUT "% HLINE From: ($xstart,$y) to ($xend,$y) (i.e. len=$xlen) with width $width gray $gray\n" if $opt_D;
+
+ if ( ! $opt_N ) {
+ $xstart = &normalize($xstart);
+ $xend = &normalize($xend);
+ }
+
+ $xlen = $xend - $xstart;
+
+ printf $OUT ("%d %d %d %d L\n",$xstart,$y,$xlen,$width);
+ # ( $mode == $LITERATE ? " L\n" : " N\n");
+
+ # Old version:
+ # print $OUT "newpath\n";
+ # print $OUT "$GRAY{$type} setgray\n";
+ # print $OUT $xend . " " . $y . " " . $xstart . " " . $y . " " . $width .
+ # " line\n";
+ # print $OUT "stroke\n";
+}
+
+# ----------------------------------------------------------------------------
+
+sub ps_draw_vline {
+ local ($OUT,$x,$ystart,$yend,$width) = @_;
+
+ print $OUT "% VLINE From: ($x,$ystart) to ($x,$yend) with width $width\n" if $opt_D;
+
+ if ( ! $opt_N ) {
+ $x = &normalize($x);
+ }
+
+ print $OUT "newpath\n";
+ print $OUT "0 setgray\n"; # constant gray level
+ printf $OUT ("%d %d %d %d %.1g line\n",
+ $x,$yend ,$x,$ystart,$width);
+ print $OUT "stroke\n";
+}
+
+# ----------------------------------------------------------------------------
+
+sub ps_draw_tic {
+ local ($OUT,$x,$ystart,$yend,$lit) = @_;
+
+ print $OUT "% TIC at ($x,$ystart-$yend)\n" if $opt_D;
+
+ if ( ! $opt_N ) {
+ $x = &normalize($x);
+ }
+
+ printf $OUT ("%d %d %d %d T\n",$x,$ystart,$yend,$lit);
+
+ # Old version without PostScript macro /tic:
+ # print $OUT "newpath\n";
+ # print $OUT "ticwidth setlinewidth\n" .
+ # $x . " " . $y . " ticlen sub moveto\n" .
+ # $x . " " . $y . " ticlen add lineto\n";
+ #print $OUT "stroke\n";
+}
+
+# ----------------------------------------------------------------------------
+
+sub ps_draw_arrow {
+ local ($OUT,$xstart,$xend,$ystart,$yend) = @_;
+
+ print $OUT "% ARROW from ($xstart,$ystart) to ($xend,$yend)\n" if $opt_D;
+
+ if ( ! $opt_N ) {
+ $xstart = &normalize($xstart);
+ $xend = &normalize($xend);
+ }
+
+ printf $OUT ("%d %d %d %d A\n",$xstart,$ystart,$xend,$yend);
+}
+
+# ----------------------------------------------------------------------------
+
+sub ps_draw_bg {
+ local ($OUT,$xstart, $xend, $ystart, $yend,
+ $intensity, $secondary_intensity) = @_;
+ local ($xlen, $ylen);
+
+ print $OUT "% Drawing bg for PE $pe from $xstart to $xend" .
+ " (intensity: $intensity, $secondary_intensity)\n" if $opt_D;
+
+ if ( ! $opt_N ) {
+ $xstart = &normalize($xstart);
+ $xend = &normalize($xend);
+ }
+
+ $xlen = $xend - $xstart;
+ $ylen = $yend - $ystart;
+
+ printf $OUT ("%d %d %d %d %.2g %.2g R\n",
+ $xstart,$ystart,$xlen,$ylen,$intensity,$secondary_intensity);
+
+ # Old version without PostScript macro /rect:
+ #print $OUT "newpath\n";
+ #print $OUT " $x_start $y_start moveto\n";
+ #print $OUT " $x_end $y_start lineto\n";
+ #print $OUT " $x_end $y_end lineto\n";
+ #print $OUT " $x_start $y_end lineto\n";
+ #print $OUT "closepath\n";
+ #print $OUT "$gray setgray\n";
+ #print $OUT "fill\n";
+}
+
+# ----------------------------------------------------------------------------
+# Initialization and such
+# ----------------------------------------------------------------------------
+
+sub write_prolog {
+ local ($OUT, $x_max, $y_max) = @_;
+ local ($date, $dist, $y, $i);
+
+ $date = &get_date();
+
+ if ( $opt_N ) {
+ $x_scale = $total_width/$x_max;
+ $y_scale = $total_height/$y_max;
+ }
+
+ # $tic_width = 2 * $x_max/$total_width; constant now
+ # $tic_len = 4 * $y_max/$total_height;
+
+ print $OUT "%!PS-Adobe-2.0\n";
+ print $OUT "%%BoundingBox: \t0 0 560 800\n";
+ print $OUT "%%Title: \t$pname $pars\n";
+ print $OUT "%%Creator: \tgr2pe\n";
+ print $OUT "%%CreationDate: \t$date\n";
+ # print $OUT "%%Orientation: \tSeascape\n";
+ print $OUT "%%EndComments\n";
+
+ # print $OUT "%%BeginSetup\n";
+ # print $OUT "%%PageOrientation: \tSeascape\n";
+ # print $OUT "%%EndSetup\n";
+
+ print $OUT "%/runlineto {1.5 setlinewidth lineto} def\n";
+ print $OUT "%/suspendlineto {0.5 setlinewidth lineto} def\n";
+ print $OUT "%/run { newpath moveto 1.5 setlinewidth lineto stroke} def\n";
+ print $OUT "%/suspend { newpath moveto 0.5 setlinewidth lineto stroke} def\n";
+ print $OUT "\n";
+ print $OUT "/total-len $x_max def\n";
+ print $OUT "/show-len $total_width def\n";
+ print $OUT "/normalize { show-len mul total-len div } def\n";
+ print $OUT "/x-normalize { exch show-len mul total-len div exch } def\n";
+ print $OUT "/str-len 12 def\n";
+ #print $OUT "/prt-n { str-len string cvs show } def" .
+ # " % print top-of-stack integer\n";
+ print $OUT "/prt-n { cvi str-len string cvs \n" .
+ " dup stringwidth pop \n" .
+ " currentpoint pop 780 gt { 10 sub } { 2 div } ifelse \n" .
+ " neg 0 rmoveto \n" .
+ " show } def \n" .
+ " % print top-of-stack integer centered at the current point\n";
+ print $OUT "/ticwidth $tic_width def\n";
+ print $OUT "%/ticlen $tic_len def % actually half of the tic-length\n";
+ print $OUT "/T % Draw a tic mark\n" .
+ " { % Operands: x, y-start, y-end of tic, from/to flag \n" .
+ " newpath\n" .
+ " 0 eq { " . ( $opt_m ? " 0.2 setgray }"
+ : " 0 0.7 0.2 setrgbcolor }" ) .
+ " { " . ( $opt_m ? " 0.8 setgray }"
+ : " 0.7 0 0.2 setrgbcolor }" ) . " ifelse\n" .
+ " ticwidth setlinewidth\n" .
+ " 3 copy pop moveto\n" .
+ " exch pop lineto\n" .
+ " stroke\n" .
+ " } def\n";
+ # " 3 copy pop x-normalize moveto\n" .
+ # " exch pop x-normalize lineto\n" .
+ # " stroke\n" .
+ # " } def\n";
+ print $OUT "/blocked-gray 0 def\n";
+ print $OUT "/idle-gray 1 def\n";
+ print $OUT "/blocked-color { 0.2 0.1 0.8 } def\n";
+ print $OUT "/idle-color { 0.8 0.1 0.2 } def\n";
+ print $OUT "/idle-color-fetch { 0.5 0.6 0.4 } def\n";
+ print $OUT "/L % Draw a line (for blocked tasks)\n" .
+ " { % Operands: (x,y)-start xlen width\n" .
+ " newpath \n" .
+ ( $opt_m ? " blocked-gray setgray\n" :
+ " blocked-color setrgbcolor\n") .
+ " setlinewidth 3 copy pop moveto 0 rlineto pop pop stroke} def\n";
+ print $OUT "/N % Draw a normalized line\n" .
+ " { % Operands: (x,y)-start xlen width\n" .
+ " newpath \n" .
+ ( $opt_m ? " blocked-gray setgray\n" :
+ " blocked-color setrgbcolor\n") .
+ " setlinewidth 3 copy pop x-normalize moveto normalize 0 rlineto pop pop stroke} def\n";
+ print $OUT "% /L line def\n";
+ print $OUT "/printText { 0 0 moveto (GrAnSim) show } def\n";
+ if ( $opt_m ) {
+ print $OUT "/logo { gsave \n" .
+ " translate \n" .
+ " .95 -.05 0 " .
+ " { setgray printText 1 -.5 translate } for \n" .
+ " 1 setgray printText\n" .
+ " grestore } def\n";
+ } else {
+ print $OUT "/logo { gsave \n" .
+ " translate \n" .
+ " .95 -.05 0\n" .
+ " { dup 1 exch sub 0 exch setrgbcolor printText 1 -.5 translate } for \n" .
+ " 1 0 0 setrgbcolor printText\n" .
+ " grestore} def\n";
+ }
+
+ print $OUT "/asciilogo { 5 sub moveto HB16 setfont (GrAnSim) show } def\n";
+ print $OUT "/starside \n" .
+ " {starlen 0 lineto currentpoint translate \n" .
+ " -144 rotate } def\n";
+
+ print $OUT "/star \n" .
+ " { moveto \n" .
+ " currentpoint translate \n" .
+ " 4 {starside} repeat \n" .
+ " closepath \n" .
+ " gsave \n" .
+ " .7 setgray fill \n" .
+ " grestore \n" .
+ " % stroke \n" .
+ " } def \n";
+ #print $OUT "/get-shade % compute shade from intensity\n" .
+ # " { pop 1 exch sub 0.6 mul 0.2 add } def\n";
+ if ( $opt_m ) {
+ print $OUT "/from 0.2 def\n";
+ print $OUT "/to 0.8 def\n";
+ print $OUT "/get-shade % compute shade from intensity\n" .
+ " { pop dup 0 eq { pop idle-gray }\n " .
+ " { 1 exch sub to from sub mul from add } ifelse } def\n";
+ " { pop 1 exch sub to from sub mul from add } def\n";
+ } else {
+ print $OUT "/from 0.5 def\n";
+ print $OUT "/to 0.9 def\n";
+ }
+ print $OUT "/epsilon 0.01 def\n";
+ print $OUT "/from-blue 0.7 def\n";
+ print $OUT "/to-blue 0.95 def\n";
+ print $OUT "/m 1 def\n";
+ print $OUT "/magnify { m mul dup 1 gt { pop 1 } if } def\n";
+ print $OUT "%\n" .
+ "% show no. of runnable threads and the current degree of fetching\n" .
+ "%\n" .
+ "/get-color % compute color from intensity\n" .
+ " { 4 mul dup % give more weight to second intensity\n" .
+ " 0 eq { pop 0 exch } \n" .
+ " { from-blue to-blue sub mul from-blue add dup \n" .
+ " 1 gt { pop 1 } if exch } ifelse \n" .
+ " dup 0 eq { pop pop idle-color }\n" .
+ " { 1 exch sub to from sub mul from add % green val is top of stack\n" .
+ " exch 0 3 1 roll } ifelse } def\n";
+
+ print $OUT "%\n";
+ print $OUT "% show no. of runable threads only\n";
+ print $OUT "%\n";
+ print $OUT "/get-color-runnable % compute color from intensity\n";
+ print $OUT "{ pop dup 0 eq { pop idle-color }\n";
+ print $OUT " { 1 exch sub to from sub mul from add % green val is top of stack\n";
+ print $OUT " 0.2 0 3 1 roll } ifelse } def\n";
+
+ print $OUT "%\n";
+ print $OUT "% show no. of fetching threads only\n";
+ print $OUT "%\n";
+ print $OUT "/get-color-fetch % compute color from intensity\n";
+ print $OUT "{ exch pop dup 0 eq { pop idle-color-fetch }\n";
+ print $OUT " { 1 exch sub to from sub mul from add % blue val is top of stack\n";
+ print $OUT " 0.2 0.6 3 2 roll } ifelse } def\n";
+
+ #print $OUT "/get-color % compute color from intensity\n" .
+ # " { dup 0 eq { pop idle-color }\n" .
+ # " { 1 exch sub to from sub mul from add 0 exch 0 } ifelse } def\n";
+ # " { dup 0.4 le { 0.4 exch sub 0.2 add 2 mul 0 0 setrgbcolor} " .
+ # " { 1 exch sub 0.4 add 0 exch 0 setrgbcolor} ifelse \n" .
+ print $OUT "/R % Draw a rectangle \n" .
+ " { % Operands: x y xlen ylen i j \n" .
+ " % (x,y) left lower start point of rectangle\n" .
+ " % xlen length of rec in x direction\n" .
+ " % ylen length of rec in y direction\n" .
+ " % i intensity of rectangle [0,1] \n" .
+ " % j intensity blue to indicate fetching\n" .
+ " % (ignored in mono mode)\n" .
+ ( $opt_m ? " get-shade setgray\n"
+ : " get-color-runnable setrgbcolor\n" ) .
+ " newpath\n" .
+ " 4 copy pop pop moveto\n" .
+ " 1 index 0 rlineto\n" .
+ " 0 index 0 exch rlineto\n" .
+ " 1 index neg 0 rlineto\n" .
+ " 0 index neg 0 exch rlineto\n" .
+ " pop pop pop pop\n" .
+ " closepath\n" .
+ " fill % Note: No stroke => no border\n" .
+ " } def\n";
+ print $OUT "% /R rect def\n";
+ print $OUT "%/A % Draw an arrow (for migration graph)\n" .
+ "% { % Operands: x y x' y' \n" .
+ "% % (x,y) start point \n" .
+ "% % (x',y') end point \n" .
+ ( $opt_m ? "% 0 setgray\n" : "% 0 0 0 setrgbcolor\n" ) .
+ "% 1 setlinewidth\n" .
+ "% newpath 4 2 roll x-normalize moveto x-normalize lineto stroke } def\n";
+
+ print $OUT "/A % No arrows \n" .
+ " { pop pop pop pop } def\n";
+ print $OUT "-90 rotate\n";
+
+ print $OUT "-785 30 translate\n";
+ print $OUT "/HE10 /Helvetica findfont 10 scalefont def\n";
+ print $OUT "/HE12 /Helvetica findfont 12 scalefont def\n";
+ print $OUT "/HE14 /Helvetica findfont 14 scalefont def\n";
+ print $OUT "/TI16 /Times-Italic findfont 16 scalefont def\n";
+ print $OUT "/HB16 /Helvetica-Bold findfont 16 scalefont def\n";
+ print $OUT "% " . "-" x 77 . "\n";
+
+ print $OUT "newpath\n";
+ print $OUT "0 8.000000 moveto\n";
+ print $OUT "0 525.000000 760.000000 525.000000 8.000000 arcto\n";
+ print $OUT "4 {pop} repeat\n";
+ print $OUT "760.000000 525.000000 760.000000 0 8.000000 arcto\n";
+ print $OUT "4 {pop} repeat\n";
+ print $OUT "760.000000 0 0 0 8.000000 arcto\n";
+ print $OUT "4 {pop} repeat\n";
+ print $OUT "0 0 0 525.000000 8.000000 arcto\n";
+ print $OUT "4 {pop} repeat\n";
+ print $OUT "0.500000 setlinewidth\n";
+ print $OUT "stroke\n";
+ print $OUT "newpath\n";
+ print $OUT "4.000000 505.000000 moveto\n";
+ print $OUT "4.000000 521.000000 752.000000 521.000000 4.000000 arcto\n";
+ print $OUT "4 {pop} repeat\n";
+ print $OUT "752.000000 521.000000 752.000000 501.000000 4.000000 arcto\n";
+ print $OUT "4 {pop} repeat\n";
+ print $OUT "752.000000 501.000000 4.000000 501.000000 4.000000 arcto\n";
+ print $OUT "4 {pop} repeat\n";
+ print $OUT "4.000000 501.000000 4.000000 521.000000 4.000000 arcto\n";
+ print $OUT "4 {pop} repeat\n";
+ print $OUT "0.500000 setlinewidth\n";
+ print $OUT "stroke\n";
+
+ print $OUT "% ----------------------------------------------------------\n";
+ print $OUT "% Print pallet\n";
+ print $OUT "% NOTE: the values for the tics must correspond to start and\n";
+ print $OUT "% end values in /get-color\n";
+ print $OUT "gsave \n";
+ print $OUT "340 508 translate\n";
+ print $OUT "0.0 0.05 1.00 \n";
+ print $OUT " { \n";
+ print $OUT " dup dup \n";
+ print $OUT " from epsilon sub gt exch \n";
+ print $OUT " from epsilon add lt \n";
+ print $OUT " and\n";
+ print $OUT " { newpath " .
+ ($opt_m ? "0 setgray " : "0 0 0 setrgbcolor ") .
+ "0 0 moveto 0 -3 rlineto stroke } if\n";
+ print $OUT " dup dup \n";
+ print $OUT " to epsilon 2 mul sub gt exch \n";
+ print $OUT " to epsilon 2 mul add lt \n";
+ print $OUT " and\n";
+ print $OUT " { newpath " .
+ ($opt_m ? "0 setgray " : "0 0 0 setrgbcolor ") .
+ "10 0 moveto 0 -3 rlineto stroke } if\n";
+ print $OUT ($opt_m ? " setgray\n" : " 0 exch 0 setrgbcolor\n");
+ print $OUT " newpath\n";
+ print $OUT " 0 0 moveto\n";
+ print $OUT " 10 0 rlineto\n";
+ print $OUT " 0 10 rlineto\n";
+ print $OUT " -10 0 rlineto\n";
+ print $OUT " closepath\n";
+ print $OUT " fill\n";
+ print $OUT " 10 0 translate \n";
+ print $OUT " } for\n";
+ print $OUT "grestore\n";
+
+ print $OUT "% Print pallet for showing fetch\n";
+ print $OUT "% NOTE: the values for the tics must correspond to start and\n";
+ print $OUT "% end values in /get-color\n";
+ print $OUT "%gsave \n";
+ print $OUT "%340 508 translate\n";
+ print $OUT "%0.0 0.05 1.00 \n";
+ print $OUT "%{ \n";
+ print $OUT "% dup dup \n";
+ print $OUT "% from epsilon sub gt exch \n";
+ print $OUT "% from epsilon add lt \n";
+ print $OUT "% and\n";
+ print $OUT "% { newpath 0 0 0 setrgbcolor 0 0 moveto 0 -3 rlineto stroke } if\n";
+ print $OUT "% dup dup \n";
+ print $OUT "% to epsilon 2 mul sub gt exch \n";
+ print $OUT "% to epsilon 2 mul add lt \n";
+ print $OUT "% and\n";
+ print $OUT "% { newpath 0 0 0 setrgbcolor 10 0 moveto 0 -3 rlineto stroke } if\n";
+ print $OUT "% 0.2 exch 0.6 exch setrgbcolor \n";
+ print $OUT "% newpath\n";
+ print $OUT "% 0 0 moveto\n";
+ print $OUT "% 10 0 rlineto\n";
+ print $OUT "% 0 10 rlineto\n";
+ print $OUT "% -10 0 rlineto\n";
+ print $OUT "% closepath\n";
+ print $OUT "% fill\n";
+ print $OUT "% 10 0 translate \n";
+ print $OUT "% } for\n";
+ print $OUT "% grestore\n";
+
+ print $OUT "% Print double pallet\n";
+ print $OUT "% NOTE: the values for the tics must correspond to start and\n";
+ print $OUT "% end values in /get-color\n";
+ print $OUT "% gsave \n";
+ print $OUT "% 340 500 translate\n";
+ print $OUT "% 0.0 0.05 1.00 \n";
+ print $OUT "% { \n";
+ print $OUT "% 0 exch 0 setrgbcolor \n";
+ print $OUT "% newpath\n";
+ print $OUT "% 0 0 moveto\n";
+ print $OUT "% 10 0 rlineto\n";
+ print $OUT "% 0 10 rlineto\n";
+ print $OUT "% -10 0 rlineto\n";
+ print $OUT "% closepath\n";
+ print $OUT "% fill\n";
+ print $OUT "% 10 0 translate \n";
+ print $OUT "% } for\n";
+ print $OUT "% grestore\n";
+ print $OUT "% gsave \n";
+ print $OUT "% 340 510 translate\n";
+ print $OUT "% 0.0 0.05 1.00 \n";
+ print $OUT "% { \n";
+ print $OUT "% dup dup \n";
+ print $OUT "% from epsilon sub gt exch \n";
+ print $OUT "% from epsilon add lt \n";
+ print $OUT "% and\n";
+ print $OUT "% { newpath 0 0 0 setrgbcolor 0 3 moveto 0 -6 rlineto stroke } if\n";
+ print $OUT "% dup dup \n";
+ print $OUT "% to epsilon 2 mul sub gt exch \n";
+ print $OUT "% to epsilon 2 mul add lt \n";
+ print $OUT "% and\n";
+ print $OUT "% { newpath 0 0 0 setrgbcolor 10 3 moveto 0 -6 rlineto stroke } if\n";
+ print $OUT "% 0.7 exch 0 setrgbcolor \n";
+ print $OUT "% newpath\n";
+ print $OUT "% 0 0 moveto\n";
+ print $OUT "% 10 0 rlineto\n";
+ print $OUT "% 0 10 rlineto\n";
+ print $OUT "% -10 0 rlineto\n";
+ print $OUT "% closepath\n";
+ print $OUT "% fill\n";
+ print $OUT "% 10 0 translate \n";
+ print $OUT "% } for\n";
+ print $OUT "% grestore\n";
+ print $OUT "% ----------------------------------------------------------\n";
+ print $OUT "HE14 setfont\n";
+ print $OUT "100.000000 508.000000 moveto\n";
+ print $OUT "($pname PEs: $nPEs Lat.: $lat ) show\n";
+
+ print $OUT "($date) dup stringwidth pop 750.000000 exch sub 508.000000 moveto show\n";
+ print $OUT ( $opt_m ? "5 512 asciilogo\n" : "5 512 logo\n");
+ print $OUT "% 100 500 moveto\n";
+
+ print $OUT "0 20 translate\n";
+
+ print $OUT "HE14 setfont\n";
+ for ($i=0; $i<$nPEs; $i++) {
+ $dist = $stripes_high[$i] - $stripes_low[$i];
+ $y = $stripes_low[$i] + $dist/2;
+ # print $OUT "/starlen $dist def\n";
+ # print $OUT "gsave 2 $y star grestore\n";
+ print $OUT " 2 " . ($stripes_low[$i]+1) . " moveto ($i) show\n";
+ }
+
+ print $OUT "20 0 translate\n";
+
+ print $OUT "% Print x-axis:\n";
+ print $OUT "1 setlinewidth\n";
+ print $OUT "0 -5 moveto total-len normalize 0 rlineto stroke\n";
+ print $OUT "gsave\n" .
+ "[2 4] 1 setdash\n" .
+ "0 0 moveto 0 $total_height rlineto stroke\n" .
+ "% $x_max 0 moveto 0 $total_height rlineto stroke\n" .
+ "grestore\n";
+ print $OUT "0 total-len 10 div total-len\n" .
+ " { dup normalize dup -5 moveto 0 -2 rlineto stroke % tic\n" .
+ " -17 moveto HE10 setfont round prt-n % print label \n" .
+ " } for \n";
+
+
+ print $OUT "$x_scale $y_scale scale\n";
+
+ print $OUT "% ++++++++++++++++++++++++++++++++++++++++++++++++++\n\n";
+
+ if ( $opt_D ) {
+ print $OUT "% Debugging info : \n";
+
+ print $OUT "% Offset is: $offset\n";
+
+ print $OUT "% y_val table: \n";
+ for ($i=0; $i<$nPEs; $i++) {
+ print $OUT "% y_val of $i: $y_val[$i]\n";
+ }
+
+ print $OUT "% x-max: $x_max; y-max: $y_max\n";
+ print $OUT "% Info from header: Prg: $pname; PEs: $nPEs; Lat.: $lat\n";
+
+ print $OUT "% ++++++++++++++++++++++++++++++++++++++++++++++++++\n\n";
+ }
+}
+
+# ----------------------------------------------------------------------------
+
+sub write_epilog {
+ local ($OUT,$x_max, $y_max) = @_;
+ local($x_scale,$y_scale);
+
+ print $OUT "showpage\n";
+}
+
+# ----------------------------------------------------------------------------
+
+sub get_x_max {
+ local ($file) = @_;
+ local ($last_line, @fs);
+
+ open (TMP,"tail -1 $file |") || die "tail -1 $file | : $!\n";
+ while (<TMP>) {
+ $last_line = $_;
+ }
+ close(TMP);
+
+ @fs = split(/[:\[\]\s]+/,$last_line);
+
+ return $fs[2];
+}
+
+# ----------------------------------------------------------------------------
+#
+#sub get_date {
+# local ($now,$today,@lt);
+#
+# @lt = localtime(time);
+# $now = join(":",reverse(splice(@lt,0,3)));
+# $today = join(".",splice(@lt,0,3));
+#
+# return $now . " on " . $today;
+#}
+#
+# ----------------------------------------------------------------------------
+
+sub get_date {
+ local ($date);
+
+ open (DATE,"date |") || die ("$!");
+ while (<DATE>) {
+ $date = $_;
+ }
+ close (DATE);
+
+ return ($date);
+}
+
+# -----------------------------------------------------------------------------
+
+sub generate_y_val_table {
+ local ($nPEs) = @_;
+ local($i, $y, $dist);
+
+ $dist = int($total_height/$nPEs);
+ for ($i=0, $y=1; $i<$nPEs; $i++, $y+=$dist) {
+ $y_val[$i] = $y + $lower_border;
+ $stripes_low[$i] = $y;
+ $stripes_high[$i] = $y+$dist-2;
+ }
+
+ # print $OUT "10 5 translate\n";
+
+ return ($dist);
+}
+
+# ----------------------------------------------------------------------------
+
+sub init {
+ local ($nPEs) = @_;
+ local($i);
+
+ for ($i=0; $i<$nPEs; $i++) {
+ if ( $opt_S ) {
+ $sparks[$i] = 0;
+ }
+ $blocked[$i] = 0;
+ $runnable[$i] = 0;
+ $fetching[$i] = 0;
+ $running[$i] = $NO_ID;
+ if ( $opt_S ) {
+ $last_sp_bg[$i] = $NO_LAST_BG;
+ }
+ $last_bg[$i] = $NO_LAST_BG;
+ $last_start[$i] = $NO_LAST_START;
+ $last_blocked[$i] = $NO_LAST_BLOCKED;
+ $last_runnable[$i] = 0;
+ #open($OUT_RA[$i], "PE". $i . ".dat") || die "PE".$i."-R.dat: $!\n";
+ #print $OUT_RA[$i] "# Number of Runnable tasks on PE $i\n";
+ #open($OUT_BA[$i], "PE". $i . ".dat") || die "PE".$i."-B.dat: $!\n";
+ #print $OUT_BA[$i] "# Number of Blocked tasks on PE $i\n";
+ }
+
+}
+
+
+# ----------------------------------------------------------------------------
+
+sub skip_header {
+ local ($FILE) = @_;
+ local($prg, $pars, $nPEs, $lat, $fetch, $in_header);
+
+ $in_header = 9;
+ while (<$FILE>) {
+ if ( $in_header = 9 ) {
+ if (/^=/) {
+ $gum_style_gr = 1;
+ $in_header = 0;
+ $prg = "????"; #
+ $pars = "-b??????"; #
+ $nPEs = $opt_p ? $opt_p : 1; #
+ $lat = $opt_l ? $opt_l : 1;
+ return ($prg, $pars, $nPEs, $lat);
+ } else {
+ $gum_style_gr = 0;
+ $in_header = 1;
+ }
+
+ }
+ $prg = $1, $pars = $2 if /^Granularity Simulation for\s+(\w+)\s+(.*)$/;
+ $nPEs = $1 if /^PEs\s+(\d+)/;
+ $lat = $1, $fetch = $2 if /^Latency\s+(\d+)[^F]+Fetch\s+(\d+)/;
+ die "Can't process GranSim-Light profiles!\n" if /^GrAnSim-Light$/i;
+
+ last if /^\+\+\+\+\+/;
+ }
+
+ return ($prg, $pars, $nPEs, $lat);
+}
+
+# ----------------------------------------------------------------------------
+
+sub process_options {
+
+ if ( $opt_h ) {
+ open(ME,$0) || die "Can't open myself ($0): $!\n";
+ $n = 0;
+ while (<ME>) {
+ last if $_ =~ /^$/;
+ print $_;
+ $n++;
+ }
+ close(ME);
+ exit ;
+ }
+
+ if ( $#ARGV != 0 ) {
+ print "Usage: $0 [options] <gr-file>\n";
+ print "Use -h option to get details\n";
+ exit 1;
+ }
+
+ $input = $ARGV[0] ;
+ $input =~ s/\.gr//;
+ $input .= ".gr";
+
+ if ( $opt_o ) {
+ ($output = $opt_o) =~ s/\.ps// ;
+ $output_b = $output . "_peb.ps";
+ $output_r = $output . "_per.ps";
+ $output_mig = $output . "_mig.ps" if $opt_M;
+ $output_sp = $output . "_sp.ps" if $opt_S;
+ $output = $output . "_pe.ps";
+ #($output_b = $opt_o) =~ s/\./-b./ ;
+ #($output_r = $opt_o) =~ s/\./-r./ ;
+ #($output_mig = $opt_o) =~ s/\./-mig./ if $opt_M;
+ #($output_sp = $opt_o) =~ s/\./-sp./ if $opt_S;
+ } else {
+ ($output = $input) =~ s/\.gr// ;
+ $output_b = $output . "_peb.ps";
+ $output_r = $output . "_per.ps";
+ $output_mig = $output . "_mig.ps" if $opt_M;
+ $output_sp = $output . "_sp.ps" if $opt_S;
+ $output = $output . "_pe.ps";
+ }
+
+ if ( $opt_v ){
+ $verbose = 1;
+ }
+
+ if ( $opt_i ) {
+ $inf_block = $opt_i;
+ } else {
+ $inf_block = 20;
+ }
+
+ $RUNNABLE_file = $input;
+ $RUNNABLE_file =~ s/\.gr//;
+ $RUNNABLE_file .= "-R";
+
+ $BLOCKED_file = $input;
+ $BLOCKED_file =~ s/\.gr//;
+ $BLOCKED_file .= "-B";
+
+ $FETCHING_file = $input;
+ $FETCHING_file =~ s/\.gr//;
+ $FETCHING_file .= "-F";
+}
+
+# ----------------------------------------------------------------------------
+
+sub print_verbose_message {
+
+ print "Input file: $input\n";
+ print "Output files: $output, $output_b, $output_r; ".
+ ($opt_M ? "Migration: $output_mig" : "") .
+ ($opt_S ? "Sparks: $output_sp" : "") .
+ "\n";
+}
+
+# ----------------------------------------------------------------------------
+# Junk from draw_segment:
+#
+# if ( $type eq $RUNNING ) {
+# die "ERROR: This version should never draw a RUNNING segment!";
+# $y = $y_val[$pe];
+# $x = $last_start[$pe];
+# $width = &get_width(0, $type);
+# # $gray = 0;
+#
+# if ( $is_very_big ) {
+# $x = int($x/$shrink_x) + 1; # rounded up
+# }
+#
+# do ps_draw_hline(OUT_B,$x,$y,$time,$width);
+# do ps_draw_hline(OUT_R,$x,$y,$time,$width);
+#
+# } elsif ( $type eq $RUNNABLE ) {
+# die "ERROR: This version should never draw a RUNNABLE segment (shades are used instead)!";
+# $y = $y_val[$pe] + $offset;
+# $x = $last_runnable[$pe];
+# $width = &get_width($runnable[$pe], $type);
+#
+# if ( $is_very_big ) {
+# $x = int($x/$shrink_x) + 1; # rounded up
+# }
+#
+# # $gray = 0.5;
+# do ps_draw_hline(OUT_R,$x,$y,$time,$width);
diff --git a/ghc/utils/parallel/gr2ps.bash b/ghc/utils/parallel/gr2ps.bash
index 28099fbff0..4d4d3da3e6 100644
--- a/ghc/utils/parallel/gr2ps.bash
+++ b/ghc/utils/parallel/gr2ps.bash
@@ -1,34 +1,41 @@
#!/usr/local/bin/bash
##############################################################################
+# Time-stamp: <Wed Jul 24 1996 22:11:13 Stardate: [-31]7859.41 hwloidl>
#
# Usage: gr2ps [options] <gr-file>
#
+# Create an overall activity graph from a GrAnSim (or GUM) profile.
# Transform the log file of a GrAnSim run (a .gr file) into a quasi-parallel
-# profile (a .qp file) and then into a PostScript file, showing essentially
-# the total number of running, runnable and blocked tasks.
+# profile (a .qp file) using gr2qp and then into a PostScript file using qp2ps.
+# The generated PostScript file shows essentially the number of running,
+# runnable and blocked tasks during the execution of the program.
#
# Options:
-# -o <file> ... write PS file to <file>
+# -o <file> ... write .ps file to <file>
+# -I <str> ... queues to be displayed (in the given order) with the encoding
+# 'a' ... active (running)
+# 'r' ... runnable
+# 'b' ... blocked
+# 'f' ... fetching
+# 'm' ... migrating
+# 's' ... sparks
+# (e.g. -I "arb" shows active, runnable, blocked tasks)
# -i <int> ... info level from 1 to 7; number of queues to display
# -m ... create mono PostScript file instead a color one.
-# -O ... optimize the produced .ps w.r.t. size
+# -O ... optimise the produced .ps w.r.t. size
# NB: With this option info is lost. If there are several values
# with same x value only the first one is printed, all
# others are dropped.
# -s <str> ... print <str> in the top right corner of the generated graph
+# -S ... improved version of sorting events
+# -l <int> ... length of slice in the .ps file; (default: 100)
+# small value => less memory consumption of .ps file & script
+# -d ... Print date instead of average parallelism
# -v ... be talkative.
# -h ... print help message (this header).
#
##############################################################################
-##############################################################################
-# Internal comments:
-# ----------------------------------------------------------------------
-# This version works on both Suns and Alphas -- KH
-# Any volunteers to convert it to /bin/sh?
-# Next time somebody calls for volunteers I'd better keep my mouth shut ... HWL
-##############################################################################
-
progname="`basename $0`"
args="$*"
@@ -37,70 +44,90 @@ help=0
mono=""
psfile=""
debug=""
-optimize=""
-info_level=0
+optimise=""
+info_level=""
info_mask=""
string=""
+length=""
+force_date=""
+hack=""
-getopts "hvmDOSs:o:i:I:" name
+getopts "hvmDCOHSdl:s:o:i:I:" name
while [ "$name" != "?" ] ; do
case $name in
h) help=1;;
v) verb=1;;
m) mono="-m";;
D) debug="-D";;
- O) optimize="-O";;
- S) lines="-S";;
- s) string=$OPTARG;;
- i) info_level=$OPTARG;;
- I) info_mask=$OPTARG;;
+ C) check="-C";;
+ O) optimise="-O";;
+ d) force_date="-d";;
+ H) hack="-H";;
+ S) improved_sort="-S";;
+ s) string="-s $OPTARG";;
+ l) length="-l $OPTARG";;
+ i) info_level="-i $OPTARG";;
+ I) info_mask="-I $OPTARG";;
o) psfile=$OPTARG;;
esac
- getopts "hvmDOSs:o:i:I:" name
+ getopts "hvmDCOHSdl:s:o:i:I:" name
done
+opts_qp="$debug $info_level $info_mask $improved_sort "
+opts_ps="$debug $check $optimise $mono $string $length $info_level $info_mask $force_date $hack "
+
shift $[ $OPTIND - 1 ]
+if [ $help -eq 1 ]
+ then no_of_lines=`cat $0 | awk 'BEGIN { n = 0; } \
+ /^$/ { print n; \
+ exit; } \
+ { n++; }'`
+ echo "`head -$no_of_lines $0`"
+ exit
+fi
+
if [ -z "$1" ]
- then echo "usage: $progname [-m] file[.gr]"
+ then echo "Usage: $progname [options] file[.gr]"
+ echo "Use -h option for details"
exit 1;
fi
f="`basename $1 .gr`"
grfile="$f".gr
-qpfile="$f".qp
-ppfile="$f".pp
+qpfile="${TMPDIR:-.}/$f".qp
+ppfile="${TMPDIR:-.}/$f".pp
if [ -z "$psfile" ]
then psfile="$f".ps
fi
-if [ $help -eq 1 ]
- then no_of_lines=`cat $0 | awk 'BEGIN { n = 0; } \
- /^$/ { print n; \
- exit; } \
- { n++; }'`
- echo "`head -$no_of_lines $0`"
- exit
-fi
-
if [ $verb -eq 1 ]
then echo "Input file: $grfile"
echo "Quasi-parallel file: $qpfile"
echo "PP file: $ppfile"
echo "PostScript file: $psfile"
- if [ "$mono" = "-m" ]
+ if [ -n "$mono" ]
then echo "Producing monochrome PS file"
else echo "Producing color PS file"
fi
- if [ "$optimize" = "-O" ]
- then echo "Optimization is ON"
- else echo "Optimization is OFF"
+ if [ -n "$optimise" ]
+ then echo "Optimisation is ON"
+ else echo "Optimisation is OFF"
fi
- if [ "$debug" = "-D" ]
+ if [ -n "$debug" ]
then echo "Debugging is turned ON"
else echo "Debugging is turned OFF"
fi
+ if [ -n "$improved_sort" ]
+ then echo "Improved sort is turned ON"
+ else echo "Improved sort is turned OFF"
+ fi
+ verb_opt="-v "
+ opts_qp="${opts_qp} $verb_opt "
+ opts_ps="${opts_ps} $verb_opt "
+ echo "Options for gr2qp: ${opts_qp}"
+ echo "Options for qp2ps: ${opts_ps}"
fi
@@ -113,22 +140,28 @@ if [ ! -f "$grfile" ]
rm -f "$qpfile" "$psfile"
prog=`head -1 "$grfile" | sed -e 's/Granularity Simulation for //'`
echo "$prog" >| "$qpfile"
- if [ $verb -eq 1 ]; then echo "Executed program: $prog"; fi
+ if [ $verb -eq 1 ]
+ then echo "Executed program: $prog"
+ fi
date >> "$qpfile"
- date="`date`"
- cat "$grfile" | gr2qp | ghc-fool-sort | sort -n +0 -1 | ghc-unfool-sort >> "$qpfile"
+ #date="`date`" # This is the date of running the script
+ date="`tail +2 $grfile | head -1 | sed -e 's/Start time: //'`"
+ cat "$grfile" | gr2qp ${opts_qp} >> "$qpfile"
+ # Sorting is part of gr2qp now.
+ # | ghc-fool-sort | sort -n +0 -1 | ghc-unfool-sort >> "$qpfile"
# max=`tail -2 "$qpfile" | awk '!/^Number of threads:/ { print $1; }'`
- max=`tail -1 "$qpfile" | awk '{ print $1; }'`
- if [ $verb -eq 1 ]; then echo "Total runtime: $max"; fi
- opts="";
- if [ $info_level -gt 0 ]
- then opts="-i $info_level";
- fi
- if [ -n "$info_mask" ]
- then opts="-I $info_mask";
- fi
- tail +3 "$qpfile" | qp2ps $debug $optimize $mono $lines "-s" "$string" $opts "$max" "$prog" "$date" >| "$psfile"
+ xmax=`tail -1 "$qpfile" | awk '{ print $2; }'`
+ ymax=`tail -1 "$qpfile" | awk '{ print $4; }'`
+ if [ $verb -eq 1 ]
+ then echo "Total runtime: $xmax"
+ echo "Maximal number of tasks: $ymax"
+ fi
+ tail +3 "$qpfile" | qp2ps ${opts_ps} "$xmax" "$ymax" "$prog" "$date" >| "$psfile"
rm -f "$qpfile"
+ if [ $verb -eq 1 ]
+ then echo "Scaling (maybe): ps-scale-y $psfile "
+ fi
+ ps-scale-y "$psfile"
fi
diff --git a/ghc/utils/parallel/gr2qp.pl b/ghc/utils/parallel/gr2qp.pl
index c0844622d8..e87f21b1e4 100644
--- a/ghc/utils/parallel/gr2qp.pl
+++ b/ghc/utils/parallel/gr2qp.pl
@@ -1,16 +1,111 @@
+#!/usr/local/bin/perl
+##############################################################################
+# Time-stamp: <Wed Jul 24 1996 20:35:01 Stardate: [-31]7859.07 hwloidl>
+#
+# Usage: gr2qp [options]
+#
+# Filter that transforms a GrAnSim profile (a .gr file) at stdin to
+# a quasi-parallel profile (a .qp file). It is the common front-end for most
+# visualization tools (except gr2pe). It collects running,
+# runnable and blocked tasks in queues of different `colours', whose meaning
+# is:
+# G ... green; queue of all running tasks
+# A ... amber; queue of all runnable tasks
+# R ... red; queue of all blocked tasks
+# Y ... cyan; queue of fetching tasks
+# C ... crimson; queue of tasks that are being stolen
+# B ... blue; queue of all sparks
+#
+# Options:
+# -i <int> ... info level from 1 to 7; number of queues to count (see qp3ps)
+# -I <str> ... count tasks that are in one of the given queues; encoding:
+# 'a' ... active (running)
+# 'r' ... runnable
+# 'b' ... blocked
+# 'f' ... fetching
+# 'm' ... migrating
+# 's' ... sparks
+# (e.g. -I "arb" counts sum of active, runnable, blocked tasks)
+# -c ... check consistency of data (e.g. no neg. number of tasks)
+# -v ... be talkative.
+# -h ... print help message (this header).
+#
+##############################################################################
+
+require "getopts.pl";
+
+&Getopts('hvDSci:I:');
+
+do process_options();
+
+if ( $opt_v ) {
+ do print_verbose_message();
+}
+
+# ---------------------------------------------------------------------------
+# Init
+# ---------------------------------------------------------------------------
+
+$max = 0;
+$pmax = 0;
+$ptotal = 0;
+$n = 0;
+
+$active = 0;
+$runnable = 0;
+$blocked = 0;
+$fetching = 0;
+$migrating = 0;
+$sparks = 0;
+
+$improved_sort_option = $opt_S ? "-S" : "";
+
+open (FOOL,"| ghc-fool-sort $improved_sort_option | sort -n +0 -1 | ghc-unfool-sort") || die "FOOL";
+
+$in_header = 9;
while(<>) {
+ if ( $in_header == 8 ) {
+ $start_time = $1 if /^Start-Time: (.*)$/;
+ $in_header = 0;
+ next;
+ }
+ if ( $in_header == 9 ) {
+ if (/^=/) {
+ $gum_style_gr = 1;
+ $in_header = 8;
+ next;
+ } else {
+ $gum_style_gr = 0;
+ $in_header = 1;
+ }
+
+ }
+ if (/^\++$/) {
+ $in_header=0;
+ next;
+ }
+ next if $in_header;
+ next if /^$/;
+ next if /^=/;
chop;
($PE, $pe, $time, $act, $tid, $rest) = split;
- next if $act eq 'REPLY';
+ $time =~ s/[\[\]:]//g;
+ # next if $act eq 'REPLY';
chop($tid) if $act eq 'END';
$from = $queue{$tid};
$extra = "";
if ($act eq 'START') {
$from = '*';
$to = 'G';
+ $n++;
+ if ( $n > $pmax ) { $pmax = $n; }
+ $ptotal++;
} elsif ($act eq 'START(Q)') {
$from = '*';
$to = 'A';
+ $n++;
+ if ( $n > $pmax ) { $pmax = $n; }
+ $ptotal++;
} elsif ($act eq 'STEALING') {
$to = 'C';
} elsif ($act eq 'STOLEN') {
@@ -19,6 +114,8 @@ while(<>) {
$to = 'A';
} elsif ($act eq 'FETCH') {
$to = 'Y';
+ } elsif ($act eq 'REPLY') {
+ $to = 'R';
} elsif ($act eq 'BLOCK') {
$to = 'R';
} elsif ($act eq 'RESUME') {
@@ -29,17 +126,204 @@ while(<>) {
$extra = " 0 0x0";
} elsif ($act eq 'END') {
$to = '*';
+ $n--;
+ if ( $opt_c && $n < 0 ) {
+ print STDERR "Error at time $time: neg. number of tasks: $n\n";
+ }
} elsif ($act eq 'SCHEDULE') {
$to = 'G';
} elsif ($act eq 'DESCHEDULE') {
$to = 'A';
+ # The following are only needed for spark profiling
+ } elsif (($act eq 'SPARK') || ($act eq 'SPARKAT')) {
+ $from = '*';
+ $to = 'B';
+ } elsif ($act eq 'USED') {
+ $from = 'B';
+ $to = '*';
+ } elsif ($act eq 'PRUNED') {
+ $from = 'B';
+ $to = '*';
+ } elsif ($act eq 'EXPORTED') {
+ $from = 'B';
+ $to = 'B';
+ } elsif ($act eq 'ACQUIRED') {
+ $from = 'B';
+ $to = 'B';
+ } else {
+ print STDERR "Error at time $time: unknown event $act\n";
}
$queue{$tid} = $to;
+ if ( $from eq '' ) {
+ print STDERRR "Error at time $time: process $tid has no from queue\n";
+ }
if ($to ne $from) {
- print substr($time,1,length($time)-3), " ",
+ print FOOL $time, " ",
$from, $to, " 0 0x", $tid, $extra, "\n";
}
+
+ if ($to ne $from) {
+ # Compare with main loop in qp3ps
+ if ($from eq '*') {
+ } elsif ($from eq 'G') {
+ --$active;
+ } elsif ($from eq 'A') {
+ --$runnable;
+ } elsif ($from eq 'R') {
+ --$blocked;
+ } elsif ($from eq 'B') {
+ --$sparks;
+ } elsif ($from eq 'C') {
+ --$migrating;
+ } elsif ($from eq 'Y') {
+ --$fetching;
+ } else {
+ print STDERR "Illegal from char: $from at $time\n";
+ }
+
+ if ($to eq '*') {
+ } elsif ($to eq 'G') {
+ ++$active;
+ } elsif ($to eq 'A') {
+ ++$runnable;
+ } elsif ($to eq 'R') {
+ ++$blocked;
+ } elsif ($to eq 'B') {
+ ++$sparks;
+ } elsif ($to eq 'C') {
+ ++$migrating;
+ } elsif ($to eq 'Y') {
+ ++$fetching;
+ } else {
+ print STDERR "Illegal to char: $to at $time\n";
+ }
+
+ }
+
+ $curr = &count();
+ if ( $curr > $max ) {
+ $max = $curr;
+ }
+
+ if ( 0 ) {
+ print STDERR "%% $time: (act,runnable,blocked,fetch,mig,sp) = " .
+ "($active, $runnable, $blocked, $fetching, $migrating, $sparks)".
+ " max = $max\n" ;
+ }
+
+ #print STDERR "Sparks @ $time: $sparks \tCurr: $curr \tMax: $max \n" if $opt_D;
+
+ if ( $time > $tmax ) {
+ $tmax = $time;
+ }
delete $queue{$tid} if $to eq '*';
-}
+}
+
+print "Time: ", $tmax, " Max_selected_tasks: ", $max,
+ " Max_running_tasks: ", $pmax, " Total_tasks: ", $ptotal, "\n";
+
+close(FOOL);
+
+exit 0;
+
+# +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+# Copied from qp3ps and slightly modified (we don't keep a list for each queue
+# but just compute the max value we get out of all calls to count during the
+# execution of the script).
+# -----------------------------------------------------------------------------
+
+# -----------------------------------------------------------------------------
+
+sub queue_on {
+ local ($queue) = @_;
+
+ return index($show,$queue)+1;
+}
+
+# -----------------------------------------------------------------------------
+
+sub count {
+ local ($res);
+
+ $res = (($queue_on_a) ? $active : 0) +
+ (($queue_on_r) ? $runnable : 0) +
+ (($queue_on_b) ? $blocked : 0) +
+ (($queue_on_f) ? $fetching : 0) +
+ (($queue_on_m) ? $migrating : 0) +
+ (($queue_on_s) ? $sparks : 0);
+
+ return $res;
+}
+
+# -----------------------------------------------------------------------------
+# DaH 'oH lo'lu'Qo'
+# -----------------------------------------------------------------------------
+
+sub set_values {
+ local ($samples,
+ $active,$runnable,$blocked,$fetching,$sparks,$migrating) = @_;
+
+ $G[$samples] = queue_on_a ? $active : 0;
+ $A[$samples] = queue_on_r ? $runnable : 0;
+ $R[$samples] = queue_on_b ? $blocked : 0;
+ $Y[$samples] = queue_on_f ? $fetching : 0;
+ $B[$samples] = queue_on_s ? $sparks : 0;
+ $C[$samples] = queue_on_m ? $migrating : 0;
+}
+
+# -----------------------------------------------------------------------------
+
+sub process_options {
+ if ( $opt_h ) {
+ open(ME,$0) || die "Can't open myself ($0): $!\n";
+ $n = 0;
+ while (<ME>) {
+ last if $_ =~ /^$/;
+ print $_;
+ $n++;
+ }
+ close(ME);
+ exit ;
+ }
+
+ $show = "armfb";
+
+ if ( $opt_i ) {
+ $show = "a" if info_level == 1;
+ $show = "ar" if info_level == 2;
+ $show = "arb" if info_level == 3;
+ $show = "arfb" if info_level == 4;
+ $show = "armfb" if info_level == 5;
+ $show = "armfbs" if info_level == 6;
+ }
+
+ if ( $opt_I ) {
+ $show = $opt_I;
+ }
+
+ if ( $opt_v ){
+ $verbose = 1;
+ }
+
+ $queue_on_a = &queue_on("a");
+ $queue_on_r = &queue_on("r");
+ $queue_on_b = &queue_on("b");
+ $queue_on_f = &queue_on("f");
+ $queue_on_s = &queue_on("s");
+ $queue_on_m = &queue_on("m");
+}
+
+sub print_verbose_message {
+
+ print STDERR "Info-str: $show\n";
+ print STDERR "The following queues are turned on: " .
+ ( $queue_on_a ? "active, " : "") .
+ ( $queue_on_r ? "runnable, " : "") .
+ ( $queue_on_b ? "blocked, " : "") .
+ ( $queue_on_f ? "fetching, " : "") .
+ ( $queue_on_m ? "migrating, " : "") .
+ ( $queue_on_s ? "sparks" : "") .
+ "\n";
+}
diff --git a/ghc/utils/parallel/gran-extr.pl b/ghc/utils/parallel/gran-extr.pl
new file mode 100644
index 0000000000..509da499d6
--- /dev/null
+++ b/ghc/utils/parallel/gran-extr.pl
@@ -0,0 +1,2114 @@
+#!/usr/local/bin/perl
+##############################################################################
+# Last modified: Time-stamp: <Sat Oct 28 1995 23:49:48 Stardate: [-31]6509.75 hwloidl>
+# (C) Hans Wolfgang Loidl
+#
+# Usage: gran-extr [options] [<sim-file>]
+#
+# Takes a file <sim-file> generated by running the GrAnSim simulator and
+# produces data files that should be used as input for gnuplot.
+# This script produces figures for:
+# runtime of tasks
+# percentage of communication
+# heap allocation
+# number of created sparks
+# cumulative no. of tasks over runtime
+# Furthermore, it computes the correlation between runtime and heap allocation.
+#
+# Options:
+# -g <file> ... filename of granularity file to be produced; should end with
+# .dat; -global and -local will be automatically inserted for
+# other versions.
+# -c <file> ... filename of communication file to be produced; should end with
+# .dat; -global and -local will be automatically inserted for
+# other versions.
+# -s <file> ... filename of sparked-threads file to be produced; should end w/
+# .dat; -global and -local will be automatically inserted for
+# other versions.
+# -a <file> ... filename of heap alloc. file to be produced; should end with
+# .dat;
+# -f <file> ... filename of communication time file to be produced;
+# should end with .dat;
+# -p <file> ... filename of GNUPLOT file that is prouced and executed.
+# -G <LIST> ... provide a list of boundaries for the Intervals used in the
+# granularity figure; must be a Perl list e.g. (10, 20, 50)
+# this is interpreted as being open to left and right.
+# -C <LIST> ... provide a list of boundaries for the Intervals used in the
+# communication figure; must be a Perl list e.g. (10, 20, 50)
+# this is interpreted as being closed to left and right.
+# -S <LIST> ... provide a list of boundaries for the Intervals used in the
+# sparked-threads figure; must be a Perl list e.g. (10, 20, 50)
+# this is interpreted as being closed to left and right.
+# -A <LIST> ... provide a list of boundaries for the Intervals used in the
+# heap alloc figure; must be a Perl list e.g. (10, 20, 50)
+# this is interpreted as being closed to left and right.
+# -F <LIST> ... provide a list of boundaries for the Intervals used in the
+# comm. time figure; must be a Perl list e.g. (10, 20, 50)
+# this is interpreted as being open to left and right.
+# -l <int> ... left margin in the produced figures.
+# -r <int> ... right margin in the produced figures.
+# -x <int> ... enlargement of figure along x-axis.
+# -y <int> ... enlargement of figure along y-axis.
+# -e <int> ... thickness of impulses in figure.
+# -i <rat> ... set the gray level of the impulses to <rat>; <rat> must be
+# between 0 and 1 with 0 meaning black.
+# -k <n> ... number of klusters (oops, clusters, I mean ;)
+# -P ... print percentage of threads rather than absolute number of
+# threads on the y axis
+# -t <file> ... use template <file> for interval settings and file names
+# Syntax of a line in the template file:
+# <flag>: <arg>
+# -T ... use smart xtics rather than GNUPLOT default x-axis naming.
+# -L ... use logarithmic scale for all figures.
+# -W ... print warnings
+# -m ... generate monchrome output
+# -h ... help; print this text.
+# -v ... verbose mode.
+#
+##############################################################################
+
+# ----------------------------------------------------------------------------
+# Command line processing and initialization
+# ----------------------------------------------------------------------------
+
+require "getopts.pl";
+
+&Getopts('hvWTPDmt:L:g:f:c:s:a:p:G:F:C:S:A:l:r:x:y:e:i:k:');
+
+do process_options();
+
+$OPEN_INT = 1;
+$CLOSED_INT = 0;
+
+if ( $opt_v ) {
+ do print_verbose_message ();
+}
+
+# ----------------------------------------------------------------------------
+# The real thing
+# ----------------------------------------------------------------------------
+
+open(INPUT,"<$input") || die "Couldn't open input file $input";
+
+do skip_header();
+
+$tot_total_rt = 0;
+$tot_rt = 0;
+$tot_bt = 0;
+$tot_ft = 0;
+$tot_it = 0;
+$gum_style_gr = 0;
+
+$line_no = 0;
+while (<INPUT>) {
+ next if /^--/; # Comment lines start with --
+ next if /^\s*$/; # Skip empty lines
+ $line_no++;
+ @fields = split(/[:,]/,$_);
+ $has_end = 0;
+
+ foreach $elem (@fields) {
+ foo : {
+ $pe = $1, $end = $2 , last foo if $elem =~ /^\s*PE\s+(\d+)\s+\[(\d+)\].*$/;
+ $tn = $1, $has_end = 1 , last foo if $elem =~ /^\s*END\s+(\w+).*$/;
+ # $tn = $1 , last foo if $elem =~ /^\s*TN\s+(\w+).*$/;
+ $sn = $1 , last foo if $elem =~ /^\s*SN\s+(\d+).*$/;
+ $start = $1 , last foo if $elem =~ /^\s*ST\s+(\d+).*$/;
+ $is_global = $1 , last foo if $elem =~ /^\s*EXP\s+(T|F).*$/;
+ $bbs = $1 , last foo if $elem =~ /^\s*BB\s+(\d+).*$/;
+ $ha = $1 , last foo if $elem =~ /^\s*HA\s+(\d+).*$/;
+ $rt = $1 , last foo if $elem =~ /^\s*RT\s+(\d+).*$/;
+ $bt = $1, $bc = $2 , last foo if $elem =~ /^\s*BT\s+(\d+)\s+\((\d+)\).*$/;
+ $ft = $1, $fc = $2 , last foo if $elem =~ /^\s*FT\s+(\d+)\s+\((\d+)\).*$/;
+ $lsp = $1 , last foo if $elem =~ /^\s*LS\s+(\d+).*$/;
+ $gsp = $1 , last foo if $elem =~ /^\s*GS\s+(\d+).*$/;
+ $my = $1 , last foo if $elem =~ /^\s*MY\s+(T|F).*$/;
+ }
+ }
+
+ next unless $has_end == 1;
+
+ $total_rt = $end - $start;
+ $ready_time = $total_rt - $rt - $bt - $ft;
+
+ # ------------------------------------------------------------------------
+ # Accumulate runtime, block time, fetch time and ready time over all threads
+ # ------------------------------------------------------------------------
+
+ $tot_total_rt += $total_rt;
+ $tot_rt += $rt;
+ $tot_bt += $bt;
+ $tot_ft += $ft;
+ $tot_it += $ready_time;
+
+ # ------------------------------------------------------------------------
+ # Gather statistics about `load' on the PEs
+ # ------------------------------------------------------------------------
+
+ print "WARNING: ready time of thread is <0: $ready_time\n" if $pedantic && ($ready_time <0);
+ $pe_load[$pe] += $ready_time;
+
+ if ( $opt_D ) {
+ print "Adding $ready_time to the load time of PE no. $pe yielding $pe_load[$pe]\n";
+ }
+
+ # ------------------------------------------------------------------------
+ # Gather statistics about the size of a spark site
+ # ------------------------------------------------------------------------
+
+ $site_size[$sn] += $rt;
+
+ if ( $opt_D ) {
+ print "Adding $rt to the size of site $sn yielding $site_size[$sn]\n";
+ }
+
+ # ------------------------------------------------------------------------
+ # Gather statistics about pure exec time
+ # ------------------------------------------------------------------------
+
+ push(@all_rts,$rt);
+ $sum_rt += $rt;
+ $max_rt = $rt if $rt > $max_rt;
+
+ $index = do get_index_open_int($rt,@exec_times);
+ $exec_class[$index]++;
+
+ if ( $is_global eq 'T' ) {
+ $exec_global_class[$index]++;
+ } else {
+ $exec_local_class[$index]++;
+ }
+
+ # ------------------------------------------------------------------------
+ # Gather statistics about communication time (absolute time rather than %)
+ # ------------------------------------------------------------------------
+
+ # Note: Communicatin time is fetch time
+
+ push(@all_fts,$ft);
+ $sum_ft += $ft;
+ $max_ft = $ft if $ft > $max_ft;
+
+ $index = do get_index_open_int($ft,@fetch_times);
+ $fetch_class[$index]++;
+
+ if ( $is_global eq 'T' ) {
+ $fetch_global_class[$index]++;
+ } else {
+ $fetch_local_class[$index]++;
+ }
+
+ # ------------------------------------------------------------------------
+ # Gather statistics about communication percentage
+ # ------------------------------------------------------------------------
+
+ $comm_perc = ( $total_rt == 0 ? 100 : (100 * $ft)/$total_rt );
+
+ push(@all_comm_percs,$comm_perc);
+ $sum_comm_perc += $comm_perc;
+ $max_comm_perc = $comm_perc if $comm_perc > $max_comm_perc;
+
+ $index = do get_index_closed_int( $comm_perc, @comm_percs );
+ if ( $index != -1 ) {
+ $comm_class[$index]++;
+ } else {
+ print "WARNING: value " . $comm_perc . " not in range (t_rt=$total_rt; ft=$ft)\n" if $pedantic;
+ $outside++;
+ }
+
+ if ( $is_global eq 'T' ) {
+ if ( $index != -1 ) {
+ $comm_global_class[$index]++;
+ } else {
+ $outside_global++;
+ }
+ } else {
+ if ( $index != -1 ) {
+ $comm_local_class[$index]++;
+ } else {
+ $outside_local++;
+ }
+ }
+
+ # ------------------------------------------------------------------------
+ # Gather statistics about locally sparked threads
+ # ------------------------------------------------------------------------
+
+ push(@all_local_sparks,$lsp);
+ $sum_local_sp += $lsp;
+ $max_local_sp = $lsp if $lsp > $max_local_sp;
+
+ $index = do get_index_open_int($lsp,@sparks);
+ $spark_local_class[$index]++;
+
+ # ------------------------------------------------------------------------
+ # Gather statistics about globally sparked threads
+ # ------------------------------------------------------------------------
+
+ push(@all_global_sparks,$gsp);
+ $sum_global_sp += $gsp;
+ $max_global_sp = $gsp if $gsp > $max_global_sp;
+
+ $index = do get_index_open_int($gsp,@sparks);
+ $spark_global_class[$index]++;
+
+ # ------------------------------------------------------------------------
+ # Add the above two entries to get the total number of sparks
+ # ------------------------------------------------------------------------
+
+ $sp = $lsp + $gsp;
+
+ push(@all_sparks,$sp);
+ $sum_sp += $sp;
+ $max_sp = $sp if $sp > $max_sp;
+
+ $index = do get_index_open_int($sp,@sparks);
+ $spark_class[$index]++;
+
+ # ------------------------------------------------------------------------
+ # Gather statistics about heap allocations
+ # ------------------------------------------------------------------------
+
+ push(@all_has,$ha);
+ $sum_ha += $ha;
+ $max_ha = $ha if $ha > $max_ha;
+
+ $index = do get_index_open_int($ha,@has);
+ $ha_class[$index]++;
+
+ # do print_line($start,$end,$is_global,$bbs,$ha,$rt,$bt,$bc,$ft,$fc,$my);
+}
+
+print STDERR "You don't want to engage me for a file with just $line_no lines, do you?(N)\n" , exit (-1) if $line_no <= 1;
+
+# ----------------------------------------------------------------------------
+
+do write_pie_chart();
+
+# ----------------------------------------------------------------------------
+# Statistics
+# ----------------------------------------------------------------------------
+
+if ( $opt_D ) {
+ print "Lengths:\n" .
+ " all_rts: $#all_rts;\n" .
+ " all_comm_percs: $#all_comm_percs;\n" .
+ " all_sparks: $#all_sparks; \n" .
+ " all_local_sparks: $#all_local_sparks; \n" .
+ " all_global_sparks: $#all_global_sparks; \n" .
+ " all_has: $#all_has\n" .
+ " all_fts: $#all_fts;\n";
+
+
+ print "No of elems in all_rts: $#all_rts with sum $sum_rt\n";
+ print "No of elems in all_comm_percs: $#all_rts with sum $sum_comm_perc\n";
+ print "No of elems in all_has: $#all_has with sum $sum_ha\n";
+ print "No of elems in all_fts: $#all_fts with sum $sum_ft\n";
+
+}
+
+do do_statistics($line_no);
+
+# Just for debugging
+# ..................
+
+if ( $opt_D ) {
+ open(FILE,">LOG") || die "Couldn't open file LOG\n";
+ printf FILE "All total runtimes (\@all_rts:)\n";
+ printf FILE "[";
+ printf FILE join(", ",@all_rts);
+ printf FILE "]\n";
+ printf FILE " Mean, std. dev: $mean_rt, $std_dev_rt\n";
+ printf FILE 70 x "-" . "\n";
+ printf FILE "All communication times (\@all_fts:)\n";
+ printf FILE "[";
+ printf FILE join(", ",@all_fts);
+ printf FILE "]\n";
+ printf FILE " Mean, std. dev: $mean_ft, $std_dev_ft\n";
+ printf FILE 70 x "-" . "\n";
+ printf FILE "All communication percentages (\@all_comm_percs:)\n";
+ printf FILE "[";
+ printf FILE join(", ",@all_comm_percs);
+ printf FILE "]\n";
+ printf FILE " Mean, std. dev: $mean_comm_perc,$std_dev_comm_perc\n";
+ printf FILE 70 x "-" . "\n";
+ printf FILE "All sparks (\@all_sparks:)\n";
+ printf FILE "[";
+ printf FILE join(", ",@all_sparks);
+ printf FILE "]\n";
+ printf FILE " Mean, std. dev: $mean_spark,$std_dev_spark\n";
+ printf FILE 70 x "-" . "\n";
+ printf FILE "All local sparks (\@all_local_sparks:)\n";
+ printf FILE "[";
+ printf FILE join(", ",@all_local_sparks);
+ printf FILE "]\n";
+ printf FILE " Mean, std. dev: $mean_local_spark,$std_dev_local_spark\n";
+ printf FILE 70 x "-" . "\n";
+ printf FILE "All global sparks (\@all_global_sparks:)\n";
+ printf FILE "[";
+ printf FILE join(", ",@all_global_sparks);
+ printf FILE "]\n";
+ printf FILE " Mean, std. dev: $mean_global_spark,$std_dev_global_spark\n";
+ printf FILE 70 x "-" . "\n";
+ printf FILE "All local sparks (\@all_has:)\n";
+ printf FILE "[";
+ printf FILE join(", ",@all_has);
+ printf FILE "]\n";
+ printf FILE " Mean, std. dev: $mean_ha,$std_dev_ha\n";
+ printf FILE 70 x "-" . "\n";
+
+
+ printf FILE ("CORR of runtime and heap alloc: %f\n",$c_exec_ha);
+ printf FILE ("CORR of runtime and no. of sparks: %f\n",$c_exec_sp);
+ printf FILE ("CORR of heap alloc and no. sparks: %f\n",$c_ha_sp);
+ printf FILE ("CORR of runtime and local sparks: %f\n",$c_exec_lsp);
+ printf FILE ("CORR of runtime and global sparks: %f\n",$c_exec_gsp);
+ printf FILE ("CORR of heap alloc and local sparks: %f\n",$c_ha_lsp);
+ printf FILE ("CORR of heap alloc and global sparks: %f\n",$c_ha_gsp);
+ printf FILE ("CORR of runtime and communication time: %f\n",$c_exec_ft);
+ printf FILE ("CORR of heap alloc and communication time: %f\n",$c_ha_ft);
+ printf FILE ("CORR of local sparks and communication time: %f\n",$c_lsp_ft);
+ printf FILE ("CORR of global_sparks and communication time: %f\n",$c_gsp_ft);
+ close FILE;
+}
+
+if ( $opt_P ) {
+ do percentify($line_no,*exec_class);
+ do percentify($line_no,*exec_global_class);
+ do percentify($line_no,*exec_local_class);
+ do percentify($line_no,*comm_class);
+ do percentify($line_no,*comm_global_class);
+ do percentify($line_no,*comm_local_class);
+ do percentify($line_no,*spark_local_class);
+ do percentify($line_no,*spark_global_class);
+ do percentify($line_no,*ha_class);
+ do percentify($line_no,*ft_class);
+}
+
+# Produce cumulative RT graph and other (more or less) nice graphs
+# ................................................................
+
+do sort_and_cum();
+
+# ----------------------------------------------------------------------------
+
+open(IV,">INTERVALS") || die "Couldn't open file INTERVALS\n";
+do write_interval(IV, 'G', &guess_interval(@all_rts));
+do write_interval(IV, 'C', 0, int($mean_comm_perc),
+ int($mean_comm_perc+$std_dev_comm_perc), 50);
+do write_interval(IV, 'S', &guess_interval(@all_sparks));
+do write_interval(IV, 'A', &guess_interval(@all_has));
+close(IV);
+
+# ----------------------------------------------------------------------------
+# Print results to STDOUT (mainly for testing)
+# ----------------------------------------------------------------------------
+
+if ( $opt_v ) {
+ do print_general_info();
+}
+
+# ----------------------------------------------------------------------------
+# Write results to data files to be processed by GNUPLOT
+# ----------------------------------------------------------------------------
+
+do write_data($gran_file_name, $OPEN_INT, $logscale{'g'}, $#exec_times+1,
+ @exec_times, @exec_class);
+
+do write_data($gran_global_file_name, $OPEN_INT, $logscale{'g'}, $#exec_times+1,
+ @exec_times, @exec_global_class);
+
+do write_data($gran_local_file_name, $OPEN_INT, $logscale{'g'}, $#exec_times+1,
+ @exec_times, @exec_local_class);
+
+do write_data($comm_file_name, $CLOSED_INT, $logscale{'c'}, $#comm_percs+1,
+ @comm_percs, @comm_class);
+
+do write_data($comm_global_file_name, $CLOSED_INT, $logscale{'c'}, $#comm_percs+1,
+ @comm_percs, @comm_global_class);
+
+do write_data($comm_local_file_name, $CLOSED_INT, $logscale{'c'}, $#comm_percs+1,
+ @comm_percs, @comm_local_class);
+
+do write_data($spark_file_name, $OPEN_INT, $logscale{'s'}, $#sparks+1,
+ @sparks, @spark_class);
+
+do write_data($spark_local_file_name, $OPEN_INT, $logscale{'s'}, $#sparks+1,
+ @sparks, @spark_local_class);
+
+do write_data($spark_global_file_name, $OPEN_INT, $logscale{'s'}, $#sparks+1,
+ @sparks, @spark_global_class);
+
+do write_data($ha_file_name, $OPEN_INT, $logscale{'a'}, $#has+1,
+ @has, @ha_class);
+
+do write_data($ft_file_name, $OPEN_INT, $logscale{'g'}, $#fetch_times+1,
+ @fetch_times, @fetch_class);
+
+
+# ----------------------------------------------------------------------------
+# Run GNUPLOT over the data files and create figures
+# ----------------------------------------------------------------------------
+
+do gnu_plotify($gp_file_name);
+
+print "Script finished successfully!\n";
+
+exit 0;
+
+# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+
+# ----------------------------------------------------------------------------
+# Basic Operations on the intervals
+# ----------------------------------------------------------------------------
+
+sub get_index_open_int {
+ local ($value,@list) = @_;
+ local ($index,$right);
+
+ # print "get_index: searching for index of" . $value;
+ # print " in " . join(':',@list);
+
+ $index = 0;
+ $right = $list[$index];
+ while ( ($value >= $right) && ($index < $#list) ) {
+ $index++;
+ $right = $list[$index];
+ }
+
+ return ( ($index == $#list) && ($value > $right) ) ? $index+1 : $index;
+}
+
+# ----------------------------------------------------------------------------
+
+sub get_index_closed_int {
+ local ($value,@list) = @_;
+ local ($index,$right);
+
+ if ( ($value < $list[0]) || ($value > $list[$#list]) ) {
+ return ( -1 );
+ }
+
+ $index = 0;
+ $left = $list[$index];
+ while ( ($left <= $value) && ($index < $#list) ) {
+ $index++;
+ $left = $list[$index];
+ }
+ return ( $index-1 );
+}
+
+# ----------------------------------------------------------------------------
+# Write operations
+# ----------------------------------------------------------------------------
+
+sub write_data {
+ local ($file_name, $open_int, $logaxes, $n, @rest) = @_;
+ local (@times) = splice(@rest,0,$n);
+ local (@class) = @rest;
+
+ open(GRAN,">$file_name") || die "Couldn't open file $file_name for output";
+
+ if ( $open_int == $OPEN_INT ) {
+
+ for ($i=0,
+ $left = ( index($logaxes,"x") != -1 ? int($times[0]/2) : 0 ),
+ $right = 0;
+ $i < $n;
+ $i++, $left = $right) {
+ $right = $times[$i];
+ print GRAN int(($left+$right)/2) . " " .
+ ($class[$i] eq "" ? "0" : $class[$i]) . "\n";
+ }
+ print GRAN $times[$n-1]+(($times[$n-1]-$times[$n-2])/2) . " " .
+ ($class[$n] eq "" ? "0" : $class[$n]) . "\n";
+
+ } else {
+
+ print GRAN ( (index($logaxes,"x") != -1) && ($times[0] == 0 ? int($times[1]/2) : ($times[$1] + $times[0])/2 ) . " " . $class[0] . "\n");
+ for ($i=1; $i < $n-2; $i++) {
+ $left = $times[$i];
+ $right = $times[$i+1];
+ print(GRAN ($left+$right)/2 . " " .
+ ($class[$i] eq "" ? "0" : $class[$i]) . "\n");
+ }
+ print GRAN ($times[$n-1]+$times[$n-2])/2 . " " . $class[$n-2] if $n >= 2;
+ }
+
+ close(GRAN);
+}
+
+# ----------------------------------------------------------------------------
+
+sub write_array {
+ local ($file_name,$n,@list) = @_;
+
+ open(FILE,">$file_name") || die "$file_name: $!";
+ for ($i=0; $i<=$#list; $i++) {
+ print FILE $i . " " . ( $list[$i] eq "" ? "0" : $list[$i] ) . "\n";
+ }
+
+ if ( $opt_D ) {
+ print "write_array: (" . join(", ",1 .. $#list) . ")\n for file $file_name returns: \n (0, $#list, &list_max(@list)\n";
+ }
+
+ return ( (0, $#list, &list_max(@list),
+ "(" . join(", ",1 .. $#list) . ")\n") );
+}
+
+# ----------------------------------------------------------------------------
+
+sub write_cumulative_data {
+ local ($file_name1,$file_name2,@list) = @_;
+ local (@ns, @elems, @xtics, $i, $j, $n, $elem, $max_clust, $xtics_str,
+ $xstart, $xend, $file_name0);
+ local ($CLUST_SZ) = $no_of_clusters;
+
+ @ns = ();
+ @elems = ();
+ $file_name0 = $file_name1;
+ $file_name0 =~ s/\.dat$//;
+ $file_name0 .= "0.dat";
+ open(CUMM,">$file_name1") || die "Couldn't open file $file_name1 (error $!)\n";
+ open(CUMM0,">$file_name0") || die "Couldn't open file $file_name0 (error $!)\n";
+
+ print CUMM "1 0\n" unless $list[0] <= 1;
+ print CUMM0 "1 0\n" unless $list[0] <= 1;;
+
+ for ($i=0; $i <= $#list; $i++) {
+ $elem = $list[$i];
+ print CUMM ($elem) . " " . int( (100 * ($i)) / ($#list+1) ) . "\n" unless $elem == 0;
+ print CUMM0 ($elem) . " " . $i . "\n" unless $elem == 0;;
+ for ($n=1; $i < $#list && $list[$i+1] == $elem; $i++, $n++) { }
+
+ print CUMM "$elem " . int( (100 * ($i+1)) / ($#list+1) ) . "\n";
+ print CUMM0 "$elem " . ($i+1) . "\n";
+
+
+ if ( $opt_D ) {
+ print "\n--> Insert: n: $n (elem $elem) in the above lists yields: \n ";
+ }
+
+ # inlined version of do insert_elem($elem, $n, $#exs, @exs, @ns)
+ for ($j=0; $j<=$#ns && $ns[$j]>$n; $j++) { }
+ if ( $j > $#ns ) {
+ push(@ns,$n);
+ push(@elems,$elem);
+ } else {
+ splice(@ns,$j,0,$n); # insert $n at pos $j and move the
+ splice(@elems,$j,0,$elem); # rest of the array to the right
+ }
+
+ if ( $opt_D ) {
+ print "[" . join(", ",@ns) . "]" . "\n and \n" .
+ "[" . join(", ",@elems) . "]\n";
+ }
+
+ }
+
+ close(CUMM);
+ close(CUMM0);
+
+ open(CLUSTERS_ALL,">" . (&dirname($file_name2)) . "CL-" .
+ &basename($file_name2))
+ || die "Couldn't open file CL-$file_name2 (error $!)\n";
+ for ($i=0; $i <= $#ns; $i++) {
+ print CLUSTERS_ALL "$elems[$i] $ns[$i]\n";
+ }
+ close(CLUSTERS_ALL);
+
+ # Interesting are only the first parts of the list (clusters!)
+ splice(@elems, $CLUST_SZ);
+ splice(@ns, $CLUST_SZ);
+
+ open(CLUSTERS,">$file_name2") || die "Couldn't open file $file_name2 (error $!)\n";
+
+ $xstart = &list_min(@elems);
+ $xend = &list_max(@elems);
+ $step = ($xend - $xstart) / ( $CLUST_SZ == 1 ? 1 : ($CLUST_SZ-1));
+
+ @xtics = ();
+ for ($i=0, $x=$xstart; $i <= $#ns; $i++, $x+=$step) {
+ print CLUSTERS "$x $ns[$i]\n";
+ push(@xtics,"\"$elems[$i]\" $x");
+ }
+ close(CLUSTERS);
+
+ $max_clust = $ns[0];
+ $xtics_str = "(" . join(", ",@xtics) . ")\n";
+
+ return ( ($xstart, $xend, $max_clust, $xtics_str) );
+}
+
+# ----------------------------------------------------------------------------
+
+sub get_xtics {
+ local ($open_int, @list) = @_;
+
+ local ($str);
+
+ if ( $open_int == $OPEN_INT ) {
+ $last = pop(@list);
+ $str = "( \">0\" 0";
+ foreach $x (@list) {
+ $str .= ", \">$x\" $x";
+ }
+ $str .= ", \"Large\" $last)\n";
+ } else {
+ $left = shift(@list);
+ $right = shift(@list) if $#list >= 0;
+ $last = pop(@list) if $#list >= 0;
+ $str = "( \"$left-$right\" " . $left;
+ $left = $right;
+ foreach $right (@list) {
+ $str .= ", \"$left-$right\" " . ($left+$right)/2;
+ $left = $right;
+ }
+ $str .= ", \"$left-$last\" " . $last .")\n" unless $last eq "";
+ }
+ return $str;
+}
+
+# ----------------------------------------------------------------------------
+
+sub print_line {
+ local ($start,$end,$is_global,$bbs,$ha,$rt,$bt,$bc,$ft,$fc,$my) = @_;
+
+ printf("START: %u, END: %u ==> tot_exec: %u\n",
+ $start,$end,$end-$start);
+ printf(" BASIC_BLOCKS: %u, HEAP_ALLOCATIONS: %u \n",$bbs,$ha);
+ printf(" TOT_EXEC: %u = RUN_TIME %u + BLOCK_TIME %u + FETCH_TIME %u\n",
+ $end-$start,$rt,$bt,$ft);
+ printf(" BLOCK_TIME %u / BLOCK_COUNT %u; FETCH_TIME %u / FETCH_COUNT %u\n",
+ $bt,$bc,$ft,$fc);
+ printf(" %s %s\n",
+ $is_global eq 'T' ? "GLOBAL" : "LOCAL",
+ $my eq 'T' ? "MANDATORY" : "NOT MANDATORY");
+}
+
+# ----------------------------------------------------------------------------
+
+sub gnu_plotify {
+ local ($gp_file_name) = @_;
+
+ local (@open_xrange,@closed_xrang,@spark_xrange,@ha_xrange, @ft_range,
+ $exec_xtics,$comm_perc_xtics,$spark_xtics,$has_xtics,
+ $cumu0_rts_file, $cumu0_has_file, $cumu0_fts_file);
+
+ $cumu0_rts_file = $cumulat_rts_file_name;
+ $cumu0_rts_file =~ s/\.dat$//;
+ $cumu0_rts_file .= "0.dat";
+
+ $cumu0_has_file = $cumulat_has_file_name;
+ $cumu0_has_file =~ s/\.dat$//;
+ $cumu0_has_file .= "0.dat";
+
+ $cumu0_fts_file = $cumulat_fts_file_name;
+ $cumu0_fts_file =~ s/\.dat$//;
+ $cumu0_fts_file .= "0.dat";
+
+ $cumu0_cps_file = $cumulat_cps_file_name;
+ $cumu0_cps_file =~ s/\.dat$//;
+ $cumu0_cps_file .= "0.dat";
+
+ @open_xrange = &range($OPEN_INT,$logscale{'g'},@exec_times);
+ @closed_xrange = &range($CLOSED_INT,$logscale{'c'},@comm_percs);
+ @spark_xrange = &range($OPEN_INT,$logscale{'s'},@sparks);
+ @ha_xrange = &range($OPEN_INT,$logscale{'a'},@has);
+ @ft_xrange = &range($OPEN_INT,$logscale{'f'},@fts);
+
+ $exec_xtics = $opt_T ? &get_xtics($OPEN_INT,@exec_times) : "" ;
+ $comm_perc_xtics = $opt_T ? &get_xtics($CLOSED_INT,@comm_percs) : "";
+ $spark_xtics = $opt_T ? &get_xtics($OPEN_INT,@sparks) : "";
+ $has_xtics = $opt_T ? &get_xtics($OPEN_INT,@has) : "";
+ $fts_xtics = $opt_T ? &get_xtics($OPEN_INT,@fts) : "";
+
+ open(GP_FILE,">$gp_file_name") ||
+ die "Couldn't open gnuplot file $gp_file_name for output\n";
+
+ if ( $opt_m ) {
+ print GP_FILE "set term postscript \"Roman\" 20\n";
+ } else {
+ print GP_FILE "set term postscript color \"Roman\" 20\n";
+ }
+
+ do write_gp_record(GP_FILE,
+ $gran_file_name, &dat2ps_name($gran_file_name),
+ "Granularity (pure exec. time)", $ylabel, $logscale{'g'},
+ @open_xrange,$max_rt_class,$exec_xtics);
+ do write_gp_record(GP_FILE,
+ $gran_global_file_name, &dat2ps_name($gran_global_file_name),
+ "Granularity (pure exec. time) of exported threads",
+ $ylabel, $logscale{'g'},
+ @open_xrange,$max_rt_global_class,$exec_xtics);
+ do write_gp_record(GP_FILE,
+ $gran_local_file_name, &dat2ps_name($gran_local_file_name),
+ "Granularity (pure exec. time) of not exported threads",
+ $ylabel,$logscale{'g'},
+ @open_xrange,$max_rt_local_class,$exec_xtics);
+
+ do write_gp_record(GP_FILE,
+ $comm_file_name, &dat2ps_name($comm_file_name),
+ "% of communication",$ylabel,$logscale{'c'},
+ @closed_xrange,$max_comm_perc_class,$comm_perc_xtics);
+ do write_gp_record(GP_FILE,
+ $comm_global_file_name, &dat2ps_name($comm_global_file_name),
+ "% of communication of exported threads",$ylabel,$logscale{'c'},
+ @closed_xrange,$max_comm_perc_global_class,$comm_perc_xtics);
+ do write_gp_record(GP_FILE,
+ $comm_local_file_name, &dat2ps_name($comm_local_file_name),
+ "% of communication of not exported threads",$ylabel,$logscale{'c'},
+ @closed_xrange,$max_comm_perc_local_class,$comm_perc_xtics);
+ do write_gp_record(GP_FILE,
+ $ft_file_name, &dat2ps_name($ft_file_name),
+ "Communication time", $ylabel, $logscale{'g'},
+ @open_xrange,$max_ft_class,$fts_xtics);
+
+
+ do write_gp_record(GP_FILE,
+ $spark_file_name, &dat2ps_name($spark_file_name),
+ "No. of sparks created", $ylabel, $logscale{'s'},
+ @spark_xrange,$max_spark_class,$spark_xtics);
+
+ do write_gp_record(GP_FILE,
+ $spark_local_file_name, &dat2ps_name($spark_local_file_name),
+ "No. of sparks created (parLocal)", $ylabel, $logscale{'s'},
+ @spark_xrange,$max_spark_local_class,$spark_xtics);
+
+ do write_gp_record(GP_FILE,
+ $spark_global_file_name, &dat2ps_name($spark_global_file_name),
+ "No. of sparks created (parGlobal)", $ylabel, $logscale{'s'},
+ @spark_xrange,$max_spark_global_class,$spark_xtics);
+
+ do write_gp_record(GP_FILE,
+ $ha_file_name, &dat2ps_name($ha_file_name),
+ "Heap Allocations (words)", $ylabel, $logscale{'a'},
+ @ha_xrange,$max_ha_class,$has_xtics);
+
+ do write_gp_lines_record(GP_FILE,
+ $cumulat_rts_file_name, &dat2ps_name($cumulat_rts_file_name),
+ "Cumulative pure exec. times","% of threads",
+ $logscale{'Cg'},
+ $xend_cum_rts, $yend_cum_rts,"");
+ # $xtics_cluster_rts as last arg?
+
+ do write_gp_lines_record(GP_FILE,
+ $cumulat_has_file_name, &dat2ps_name($cumulat_has_file_name),
+ "Cumulative heap allocations","% of threads",
+ $logscale{'Ca'},
+ $xend_cum_has, $yend_cum_has,"");
+ # $xtics_cluster_has as last arg?
+
+ do write_gp_lines_record(GP_FILE,
+ $cumu0_rts_file, &dat2ps_name($cumu0_rts_file),
+ "Cumulative pure exec. times","Number of threads",
+ $logscale{'Cg'},
+ $xend_cum_rts, $yend_cum0_rts,"");
+ # $xtics_cluster_rts as last arg?
+
+ do write_gp_lines_record(GP_FILE,
+ $cumu0_has_file, &dat2ps_name($cumu0_has_file),
+ "Cumulative heap allocations","Number of threads",
+ $logscale{'Ca'},
+ $xend_cum_has, $yend_cum0_has,"");
+ # $xtics_cluster_has as last arg?
+
+ do write_gp_lines_record(GP_FILE,
+ $cumulat_fts_file_name, &dat2ps_name($cumulat_fts_file_name),
+ "Cumulative communication times","% of threads",
+ $logscale{'Cg'},
+ $xend_cum_fts, $yend_cum_fts,"");
+ # $xtics_cluster_rts as last arg?
+
+ do write_gp_lines_record(GP_FILE,
+ $cumu0_fts_file, &dat2ps_name($cumu0_fts_file),
+ "Cumulative communication times","Number of threads",
+ $logscale{'Cg'},
+ $xend_cum_fts, $yend_cum0_fts,"");
+ # $xtics_cluster_rts as last arg?
+
+ do write_gp_lines_record(GP_FILE,
+ $cumulat_cps_file_name, &dat2ps_name($cumulat_cps_file_name),
+ "Cumulative communication percentages","% of threads",
+ "", # No logscale here !
+ $xend_cum_cps, $yend_cum_cps,"");
+ # $xtics_cluster_rts as last arg?
+
+ do write_gp_lines_record(GP_FILE,
+ $cumu0_cps_file, &dat2ps_name($cumu0_cps_file),
+ "Cumulative communication percentages","Number of threads",
+ "", # No logscale here !
+ $xend_cum_cps, $yend_cum0_cps,"");
+ # $xtics_cluster_rts as last arg?
+
+ do write_gp_record(GP_FILE,
+ $clust_rts_file_name, &dat2ps_name($clust_rts_file_name),
+ "Pure exec. time", "No. of threads", $logscale{'CG'},
+ $xstart_cluster_rts,$xend_cluster_rts,$max_cluster_rts,$xtics_cluster_rts);
+
+ do write_gp_record(GP_FILE,
+ $clust_has_file_name, &dat2ps_name($clust_has_file_name),
+ "Pure exec. time", "No. of threads", $logscale{'CA'},
+ $xstart_cluster_has,$xend_cluster_has,$max_cluster_has,$xtics_cluster_has);
+
+ do write_gp_record(GP_FILE,
+ $clust_fts_file_name, &dat2ps_name($clust_fts_file_name),
+ "Communication time", "No. of threads", $logscale{'CG'},
+ $xstart_cluster_fts,$xend_cluster_fts,$max_cluster_fts,$xtics_cluster_rts);
+
+
+ do write_gp_simple_record(GP_FILE,
+ $pe_file_name, &dat2ps_name($pe_file_name),
+ "Processing Elements (PEs)", "Ready Time (not running)",
+ $logscale{'Yp'},$xstart_pe,$xend_pe,$max_pe,$xtics_pe);
+
+ do write_gp_simple_record(GP_FILE,
+ $sn_file_name, &dat2ps_name($sn_file_name),
+ "Spark sites", "Pure exec. time",
+ $logscale{'Ys'},$xstart_sn,$xend_sn,$max_sn,$xtics_sn);
+
+ close GP_FILE;
+
+ print "Gnu plotting figures ...\n";
+ system "gnuplot $gp_file_name";
+
+ print "Extending thickness of impulses ...\n";
+ do gp_ext($gran_file_name,
+ $gran_global_file_name,
+ $gran_local_file_name,
+ $comm_file_name,
+ $comm_global_file_name,
+ $comm_local_file_name,
+ $spark_file_name,
+ $spark_local_file_name,
+ $spark_global_file_name,
+ $ha_file_name,
+ $ft_file_name,
+ $clust_fts_file_name,
+ $clust_rts_file_name,
+ $clust_has_file_name,
+ $pe_file_name,
+ $sn_file_name
+ );
+
+
+}
+
+# ----------------------------------------------------------------------------
+
+sub gp_ext {
+ local (@file_names) = @_;
+ local ($file_name);
+ local ($ps_file_name);
+ local ($prg);
+
+ #$prg = system "which gp-ext-imp";
+ #print " Using script $prg for impuls extension\n";
+ $prg = $ENV{GRANDIR} ? $ENV{GRANDIR} . "/bin/gp-ext-imp"
+ : $ENV{HOME} . "/bin/gp-ext-imp" ;
+ if ( $opt_v ) {
+ print " (using script $prg)\n";
+ }
+
+ foreach $file_name (@file_names) {
+ $ps_file_name = &dat2ps_name($file_name);
+ system "$prg -w $ext_size -g $gray " .
+ $ps_file_name . " " .
+ $ps_file_name . "2" ;
+ system "mv " . $ps_file_name . "2 " . $ps_file_name;
+ }
+}
+
+# ----------------------------------------------------------------------------
+
+sub write_gp_record {
+ local ($file,$in_file,$out_file,$xlabel,$ylabel,$logaxes,
+ $xstart,$xend,$ymax,$xtics) = @_;
+
+ if ( $xstart >= $xend ) {
+ print ("WARNING: empty xrange [$xstart:$xend] changed to [$xstart:" . $xstart+1 . "]\n") if ( $pedantic || $opt_v );
+ $xend = $xstart + 1;
+ }
+
+ if ( $ymax <=0 ) {
+ $ymax = 2;
+ print "WARNING: empty yrange changed to [0:$ymax]\n" if ( $pedantic || $opt_v );
+ }
+
+ $str = "set size " . $xsize . "," . $ysize . "\n" .
+ "set xlabel \"" . $xlabel . "\"\n" .
+ "set ylabel \"" . $ylabel . "\"\n" .
+ ($xstart eq "" ? ""
+ : "set xrange [" . int($xstart) .":" . int($xend) . "]\n") .
+ ($ymax eq "" ? ""
+ : "set yrange [" . (index($logaxes,"y") != -1 ? 1 : 0) .
+ ":" . &list_max(2,int($ymax+$ymax/5)) . "]\n") .
+ ($xtics ne "" ? "set xtics $xtics" : "") .
+ "set tics out\n" .
+ "set border\n" .
+ "set title \"$nPEs PEs\"\n" .
+ "set nokey \n" .
+ "set nozeroaxis\n" .
+ "set format xy \"%g\"\n" .
+ (index($logaxes,"x") != -1 ?
+ "set logscale x\n" :
+ "set nologscale x\n") .
+ (index($logaxes,"y") != -1 ?
+ "set logscale y\n" :
+ "set nologscale y\n") .
+ "set output \"" . $out_file . "\"\n" .
+ "plot \"" . $in_file . "\" with impulses\n\n";
+ print $file $str;
+}
+
+# ----------------------------------------------------------------------------
+
+sub write_gp_lines_record {
+ local ($file,$in_file,$out_file,$xlabel,$ylabel,$logaxes,
+ $xend,$yend,$xtics) = @_;
+
+ local ($str);
+
+ $str = "set xlabel \"" . $xlabel . "\"\n" .
+ "set ylabel \"" . $ylabel . "\"\n" .
+ "set xrange [" . ( index($logaxes,"x") != -1 ? 1 : 0 ) . ":$xend]\n" .
+ "set yrange [" . ( index($logaxes,"y") != -1 ? 1 : 0 ) . ":$yend]\n" .
+ "set border\n" .
+ "set nokey\n" .
+ ( $xtics ne "" ? "set xtics $xtics" : "" ) .
+ (index($logaxes,"x") != -1 ?
+ "set logscale x\n" :
+ "set nologscale x\n") .
+ (index($logaxes,"y") != -1 ?
+ "set logscale y\n" :
+ "set nologscale y\n") .
+ "set nozeroaxis\n" .
+ "set format xy \"%g\"\n" .
+ "set output \"" . $out_file . "\"\n" .
+ "plot \"" . $in_file . "\" with lines\n\n";
+ print $file $str;
+}
+
+
+# ----------------------------------------------------------------------------
+
+sub write_gp_simple_record {
+ local ($file,$in_file,$out_file,$xlabel,$ylabel,$logaxes,
+ $xstart,$xend,$ymax,$xtics) = @_;
+
+ $str = "set size " . $xsize . "," . $ysize . "\n" .
+ "set xlabel \"" . $xlabel . "\"\n" .
+ "set ylabel \"" . $ylabel . "\"\n" .
+ ($xstart eq "" ? ""
+ : "set xrange [" . int($xstart) .":" . int($xend) . "]\n") .
+ ($ymax eq "" ? ""
+ : "set yrange [" . (index($logaxes,"y") != -1 ? 1 : 0) .
+ ":" . &list_max(2,int($ymax+$ymax/5)) . "]\n") .
+ ($xtics ne "" ? "set xtics $xtics" : "") .
+ "set border\n" .
+ "set nokey\n" .
+ "set tics out\n" .
+ "set nozeroaxis\n" .
+ "set format xy \"%g\"\n" .
+ (index($logaxes,"x") != -1 ?
+ "set logscale x\n" :
+ "set nologscale x\n") .
+ (index($logaxes,"y") != -1 ?
+ "set logscale y\n" :
+ "set nologscale y\n") .
+ "set output \"" . $out_file . "\"\n" .
+ "plot \"" . $in_file . "\" with impulses\n\n";
+ print $file $str;
+}
+
+# ----------------------------------------------------------------------------
+
+sub dat2ps_name {
+ local ($dat_name) = @_;
+
+ $dat_name =~ s/\.dat$/\.ps/;
+ return ($dat_name);
+}
+
+# ----------------------------------------------------------------------------
+
+sub range {
+ local ($open_int, $logaxes, @ints) = @_;
+
+ local ($range, $left_margin, $right_margin);
+
+ $range = $ints[$#ints]-$ints[0];
+ $left_margin = 0; # $range/10;
+ $right_margin = 0; # $range/10;
+
+ if ( $opt_D ) {
+ print "\n==> Range: logaxes are $logaxes i.e. " .
+ (index($logaxes,"x") != -1 ? "matches x axis\n"
+ : "DOESN'T match x axis\n");
+ }
+ if ( index($logaxes,"x") != -1 ) {
+ if ( $open_int == $OPEN_INT ) {
+ return ( ($ints[0]/2-$left_margin,
+ $ints[$#ints]+($ints[$#ints]-$ints[$#ints-1])/2+$right_margin) );
+ } else {
+ return ( ( &list_max(1,$ints[0]-$left_margin),
+ $ints[$#ints]+($ints[$#ints]-$ints[$#ints-1])/2+$right_margin) );
+ }
+ } else {
+ if ( $open_int == $OPEN_INT ) {
+ return ( ($ints[0]/2-$left_margin,
+ $ints[$#ints]+($ints[$#ints]-$ints[$#ints-1])/2+$right_margin) );
+ } else {
+ return ( ($ints[0]-$left_margin,
+ $ints[$#ints]+($ints[$#ints]-$ints[$#ints-1])/2+$right_margin) );
+ }
+ }
+}
+
+# ----------------------------------------------------------------------------
+
+sub percentify {
+ local ($sum,*classes) = @_;
+
+ for ($i=0; $i<=$#classes; $i++) {
+ $classes[$i] = (100 * $classes[$i]) / $sum;
+ }
+}
+
+# ----------------------------------------------------------------------------
+# ToDo: get these statistics functions from "stat.pl"
+# ----------------------------------------------------------------------------
+
+sub mean_std_dev {
+ local ($sum,@list) = @_;
+
+ local ($n, $s, $s_);
+
+ #print "\nmean_std_dev: sum is $sum ; list has length $#list";
+
+ $n = $#list+1;
+ $mean_value = $sum/$n;
+
+ $s_ = 0;
+ foreach $x (@list) {
+ $s_ += $x;
+ $s += ($mean_value - $x) ** 2;
+ }
+ if ( $sum != $s_ ) {
+ print "ERROR in mean_std_dev: provided sum is wrong " .
+ "(provided: $sum; computed: $s_)\n";
+ print " list_sum: " . &list_sum(@list) . "\n";
+ exit (2);
+ }
+
+ return ( ($mean_value, sqrt($s / ($n - 1)) ) );
+}
+
+# ----------------------------------------------------------------------------
+
+sub _mean_std_dev {
+ return ( &mean_std_dev(&list_sum(@_), @_) );
+}
+
+# ----------------------------------------------------------------------------
+# Compute covariance of 2 vectors, having their sums precomputed.
+# Input: $n ... number of all elements in @list_1 as well as in @list_2
+# (i.e. $n = $#list_1+1 = $#list_2+1).
+# $mean_1 ... mean value of all elements in @list_1
+# @list_1 ... list of integers; first vector
+# $mean_2 ... mean value of all elements in @list_2
+# @list_2 ... list of integers; first vector
+# Output: covariance of @list_1 and @list_2
+# ----------------------------------------------------------------------------
+
+sub cov {
+ local ($n, $mean_1, @rest) = @_;
+ local (@list_1) = splice(@rest,0,$n);
+ local ($mean_2, @list_2) = @rest;
+
+ local ($i,$s,$s_1,$s_2);
+
+ for ($i=0; $i<$n; $i++) {
+ $s_1 += $list_1[$i];
+ $s_2 += $list_2[$i];
+ $s += ($mean_1 - $list_1[$i]) * ($mean_2 - $list_2[$i]);
+ }
+ if ( $mean_1 != ($s_1/$n) ) {
+ print "ERROR in cov: provided mean value is wrong " .
+ "(provided: $mean_1; computed: " . ($s_1/$n) . ")\n";
+ exit (2);
+ }
+ if ( $mean_2 != ($s_2/$n) ) {
+ print "ERROR in cov: provided mean value is wrong " .
+ "(provided: $mean_2; computed: " . ($s_2/$n) . ")\n";
+ exit (2);
+ }
+ return ( $s / ($n - 1) ) ;
+}
+
+# ----------------------------------------------------------------------------
+# Compute correlation of 2 vectors, having their sums precomputed.
+# Input: $n ... number of all elements in @list_1 as well as in @list_2
+# (i.e. $n = $#list_1+1 = $#list_2+1).
+# $sum_1 ... sum of all elements in @list_1
+# @list_1 ... list of integers; first vector
+# $sum_2 ... sum of all elements in @list_2
+# @list_2 ... list of integers; first vector
+# Output: correlation of @list_1 and @list_2
+# ----------------------------------------------------------------------------
+
+sub corr {
+ local ($n, $sum_1, @rest) = @_;
+ local (@list_1) = splice(@rest,0,$n);
+ local ($sum_2, @list_2) = @rest;
+
+ local ($mean_1,$mean_2,$std_dev_1,$std_dev_2);
+
+ if ( $opt_D ) {
+ print "\ncorr: n=$n sum_1=$sum_1 sum_2=$sum_2\n";
+ print " list_sum of list_1=" . &list_sum(@list_1) .
+ " list_sum of list_2=" . &list_sum(@list_2) . "\n";
+ print " len of list_1=$#list_1 len of list_2=$#list_2\n";
+ }
+
+ ($mean_1, $std_dev_1) = &mean_std_dev($sum_1,@list_1);
+ ($mean_2, $std_dev_2) = &mean_std_dev($sum_2,@list_2);
+
+ if ( $opt_D ) {
+ print "corr: $mean_1, $std_dev_1; $mean_2, $std_dev_2\n";
+ }
+
+ return ( ($std_dev_1 * $std_dev_2) == 0 ?
+ 0 :
+ &cov($n, $mean_1, @list_1, $mean_2, @list_2) /
+ ( $std_dev_1 * $std_dev_2 ) );
+}
+
+# ----------------------------------------------------------------------------
+
+sub list_sum {
+ local (@list) = @_;
+
+ local ($sum);
+
+ foreach $x (@list) {
+ $sum += $x;
+ }
+
+ return ($sum);
+}
+
+# ----------------------------------------------------------------------------
+
+sub list_max {
+ local (@list) = @_;
+
+ local ($max) = shift;
+
+ foreach $x (@list) {
+ $max = $x if $x > $max;
+ }
+
+ return ($max);
+}
+
+# ----------------------------------------------------------------------------
+
+sub list_min {
+ local (@list) = @_;
+
+ local ($min) = shift;
+
+ foreach $x (@list) {
+ $min = $x if $x < $min;
+ }
+
+ return ($min);
+}
+
+# ----------------------------------------------------------------------------
+
+sub guess_interval {
+ local (@list) = @_ ;
+
+ local ($min,$max,$sum,$mean,$std_dev,@intervals);
+
+ $min = &list_min(@list);
+ $max = &list_max(@list);
+ $sum = &list_sum(@list);
+ ($mean, $std_dev) = &mean_std_dev($sum,@list);
+
+ @intervals = (int($mean-$std_dev),int($mean-$std_dev/2),int($mean),
+ int($mean+$std_dev/2),int($mean+$std_dev));
+
+ while ($#intervals>=0 && $intervals[0]<0) {
+ shift(@intervals);
+ }
+
+ return (@intervals);
+}
+
+# ----------------------------------------------------------------------------
+
+sub write_interval {
+ local ($file,$flag,@intervals) = @_;
+
+ printf $file "$flag: (" . join(", ",@intervals) . ")\n";
+}
+
+# ----------------------------------------------------------------------------
+
+sub read_template {
+
+ if ( $opt_v ) {
+ print "Reading settings from template file $templ_file_name ...\n";
+ }
+
+ open(TEMPLATE,$templ_file_name) || die "Couldn't open file $templ_file_name";
+ while (<TEMPLATE>) {
+ next if /^\s*$/ || /^--/;
+ if (/^\s*G[:,;.\s]+([^\n]+)$/) {
+ $list_str = $1;
+ $list_str =~ s/[\(\)\[\]]//g;
+ @exec_times = split(/[,;. ]+/, $list_str);
+ } elsif (/^\s*F[:,;.\s]+([^\n]+)$/) {
+ $list_str = $1;
+ $list_str =~ s/[\(\)\[\]]//g;
+ @fetch_times = split(/[,;. ]+/, $list_str);
+ } elsif (/^\s*A[:,;.\s]+([^\n]+)$/) {
+ $list_str = $1;
+ $list_str =~ s/[\(\)\[\]]//g;
+ @has = split(/[,;. ]+/, $list_str);
+ } elsif (/^\s*C[:,;.\s]+([^\n]+)$/) {
+ $list_str = $1;
+ $list_str =~ s/[\(\)\[\]]//g;
+ @comm_percs = split(/[,;. ]+/, $list_str);
+ } elsif (/^\s*S[:,;.\s]+([^\n]+)$/) {
+ $list_str = $1;
+ $list_str =~ s/[\(\)\[\]]//g;
+ @sparks = split(/[,;. ]+/, $list_str);
+ } elsif (/^\s*g[:,;.\s]+([\S]+)$/) {
+ ($gran_file_name,$gran_global_file_name, $gran_local_file_name) =
+ &mk_global_local_names($1);
+ } elsif (/^\s*f[:,;.\s]+([\S]+)$/) {
+ ($ft_file_name,$ft_global_file_name, $ft_local_file_name) =
+ &mk_global_local_names($1);
+ } elsif (/^\s*c[:,;.\s]+([\S]+)$/) {
+ ($comm_file_name, $comm_global_file_name, $comm_local_file_name) =
+ &mk_global_local_names($1);
+ } elsif (/^\s*s[:,;.\s]+([\S]+)$/) {
+ ($spark_file_name, $spark_global_file_name, $spark_local_file_name) =
+ &mk_global_local_names($1);
+ } elsif (/^\s*a[:,;.\s]+([\S]+)$/) {
+ ($ha_file_name, $ha_global_file_name, $ha_local_file_name) =
+ &mk_global_local_names($1);
+ } elsif (/^\s*p[:,;.\s]+([\S]+)$/) {
+ $gp_file_name = $1;
+ $ps_file_name = &dat2ps_name($gp_file_name);
+
+ } elsif (/^\s*Xcorr[:,;.\s]+([\S]+)$/) {
+ $corr_file_name = $1;
+ } elsif (/^\s*Xcumulat-rts[:,;.\s]+([\S]+)$/) {
+ $cumulat_rts_file_name = $1;
+ } elsif (/^\s*Xcumulat-has[:,;.\s]+([\S]+)$/) {
+ $cumulat_has_file_name = $1;
+ } elsif (/^\s*Xcumulat-fts[:,;.\s]+([\S]+)$/) {
+ $cumulat_fts_file_name = $1;
+ } elsif (/^\s*Xcumulat-cps[:,;.\s]+([\S]+)$/) {
+ $cumulat_cps_file_name = $1;
+ } elsif (/^\s*Xclust-rts[:,;.\s]+([\S]+)$/) {
+ $clust_rts_file_name = $1;
+ } elsif (/^\s*Xclust-has[:,;.\s]+([\S]+)$/) {
+ $clust_has_file_name = $1;
+ } elsif (/^\s*Xclust-fts[:,;.\s]+([\S]+)$/) {
+ $clust_fts_file_name = $1;
+ } elsif (/^\s*Xclust-cps[:,;.\s]+([\S]+)$/) {
+ $clust_cps_file_name = $1;
+ } elsif (/^\s*Xpe[:,;.\s]+([\S]+)$/) {
+ $pe_file_name = $1;
+ } elsif (/^\s*Xsn[:,;.\s]+([\S]+)$/) {
+ $sn_file_name = $1;
+
+ } elsif (/^\s*XRTS[:,;.\s]+([\S]+)$/) {
+ $rts_file_name = $1;
+ } elsif (/^\s*XHAS[:,;.\s]+([\S]+)$/) {
+ $has_file_name = $1;
+ } elsif (/^\s*XFTS[:,;.\s]+([\S]+)$/) {
+ $fts_file_name = $1;
+ } elsif (/^\s*XLSPS[:,;.\s]+([\S]+)$/) {
+ $lsps_file_name = $1;
+ } elsif (/^\s*XGSPS[:,;.\s]+([\S]+)$/) {
+ $gsps_file_name = $1;
+ } elsif (/^\s*XCPS[:,;.\s]+([\S]+)$/) {
+ $cps_file_name = $1;
+ } elsif (/^\s*XCCPS[:,;.\s]+([\S]+)$/) {
+ $ccps_file_name = $1;
+
+ } elsif (/^\s*\-[:,;.\s]+([\S]+)$/) {
+ $input = $1;
+ } elsif (/^\s*L[:,;\s]+(.*)$/) {
+ $str = $1;
+ %logscale = ('g',"xy",'a',"xy",'Cg',"xy",'Ca',"xy",'Yp',"y",'Ys',"y") , next if $str eq ".";
+ $str =~ s/[\(\)\[\]]//g;
+ %logscale = split(/[,;. ]+/, $str);
+ } elsif (/^\s*i[:,;.\s]+([\S]+)$/) {
+ $gray = $1;
+ } elsif (/^\s*k[:,;.\s]+([\S]+)$/) {
+ $no_of_clusters = $1;
+ } elsif (/^\s*e[:,;.\s]+([\S]+)$/) {
+ $ext_size = $1;
+ } elsif (/^\s*v.*$/) {
+ $verbose = 1;
+ } elsif (/^\s*T.*$/) {
+ $opt_T = 1;
+ } elsif (/^\s*m.*$/) {
+ $opt_m = 1;
+ }
+ }
+ close(TEMPLATE);
+}
+
+# ----------------------------------------------------------------------------
+
+sub mk_global_local_names {
+ local ($file_name) = @_;
+
+ $file_name .= ".dat" unless $file_name =~ /\.dat$/;
+ $global_file_name = $file_name;
+ $global_file_name =~ s/\.dat/\-global\.dat/ ;
+ $local_file_name = $file_name;
+ $local_file_name =~ s/\.dat/\-local\.dat/ ;
+
+ return ( ($file_name, $global_file_name, $local_file_name) );
+}
+
+# ----------------------------------------------------------------------------
+
+# ----------------------------------------------------------------------------
+
+sub pre_process {
+ local ($lines) = @_;
+
+ local (@all_rts, @all_comm_percs, @all_sparks, @all_local_sparks,
+ @all_global_sparks, @all_has, @fields,
+ $line_no, $elem, $total_rt, $comm_perc,
+ $pe, $start, $end, $is_global, $bbs, $ha, $rt, $bt, $ft,
+ $lsp, $gsp, $my);
+
+ if ( $opt_v ) {
+ print "Preprocessing file $input ... \n";
+ }
+
+ open(INPUT,"<$input") || die "Couldn't open input file $input";
+
+ do skip_header();
+
+ $line_no = 0;
+ while (<INPUT>) {
+ $line_no++;
+ last if $line_no > $lines;
+
+ @fields = split(/,/,$_);
+
+ foreach $elem (@fields) {
+ foo : {
+ $pe = $1 , last foo if $elem =~ /^\s*PE\s+(\d+).*$/;
+ $start = $1 , last foo if $elem =~ /^\s*ST\s+(\d+).*$/;
+ $end = $1 , last foo if $elem =~ /^\s*END\s+(\d+).*$/;
+ $is_global = $1 , last foo if $elem =~ /^\s*GBL\s+(T|F).*$/;
+ $bbs = $1 , last foo if $elem =~ /^\s*BB\s+(\d+).*$/;
+ $ha = $1 , last foo if $elem =~ /^\s*HA\s+(\d+).*$/;
+ $rt = $1 , last foo if $elem =~ /^\s*RT\s+(\d+).*$/;
+ $bt = $1, $bc = $2 , last foo if $elem =~ /^\s*BT\s+(\d+)\s+\((\d+)\).*$/;
+ $ft = $1, $fc = $2 , last foo if $elem =~ /^\s*FT\s+(\d+)\s+\((\d+)\).*$/;
+ $lsp = $1 , last foo if $elem =~ /^\s*LS\s+(\d+).*$/;
+ $gsp = $1 , last foo if $elem =~ /^\s*GS\s+(\d+).*$/;
+ $my = $1 , last foo if $elem =~ /^\s*MY\s+(T|F).*$/;
+ }
+ }
+
+ $total_rt = $end - $start;
+ $comm_perc = ( $total_rt == 0 ? 100 : (100 * $ft)/$total_rt );
+ $sp = $lsp + $gsp;
+
+ push(@all_rts,$rt);
+
+ push(@all_comm_percs,$comm_perc);
+
+ push(@all_sparks,$sp);
+ push(@all_local_sparks,$lsp);
+ push(@all_global_sparks,$gsp);
+
+ push(@all_has,$ha);
+ }
+
+ close(INPUT);
+
+ @exec_times = &guess_interval(@all_rts);
+ @sparks = &guess_interval(@all_sparks);
+ @has = &guess_interval(@all_has);
+
+ ($m,$std_dev) = &_mean_std_dev(@all_comm_percs);
+ @comm_percs = (0, int($m), int($std_dev), 100) unless int($m) == 0;
+ @comm_percs = (0, 1, 2, 5, 10, 50, 100) if int($m) == 0;
+}
+
+# ----------------------------------------------------------------------------
+
+sub process_options {
+ if ( $opt_h ) {
+ open(ME,$0) || die "Can't open myself ($0)";
+ $n = 0;
+ while (<ME>) {
+ last if $_ =~ /^$/;
+ print $_;
+ $n++;
+ }
+ close(ME);
+
+ # system "cat $0 | awk 'BEGIN { n = 0; } \
+ # /^$/ { print n; \
+ # exit; } \
+ # { n++; }'"
+ exit ;
+ }
+
+ if ( $opt_W ) {
+ $pedantic = 1;
+ } else {
+ $pedantic = 0;
+ }
+
+ $input = $#ARGV == -1 ? "-" : $ARGV[0] ;
+
+ if ( $#ARGV != 0 ) {
+ #print "Usage: gran-extr [options] <sim-file>\n";
+ #print "Use -h option to get details\n";
+ #exit 1;
+
+ }
+
+
+ if ( ! $opt_t ) {
+ do pre_process(20);
+ }
+
+ if ( $opt_g ) {
+ ($gran_file_name, $gran_global_file_name, $gran_local_file_name) =
+ do mk_global_local_names($opt_g);
+ } else {
+ $gran_file_name = "gran.dat";
+ $gran_global_file_name = "gran-global.dat";
+ $gran_local_file_name = "gran-local.dat";
+ }
+
+ if ( $opt_c ) {
+ ($comm_file_name, $comm_global_file_name, $comm_local_file_name) =
+ do mk_global_local_names($opt_c);
+ } else {
+ $comm_file_name = "comm.dat";
+ $comm_global_file_name = "comm-global.dat";
+ $comm_local_file_name = "comm-local.dat";
+ }
+
+ if ( $opt_f ) {
+ ($ft_file_name, $ft_global_file_name, $ft_local_file_name) =
+ do mk_global_local_names($opt_c);
+ } else {
+ $ft_file_name = "ft.dat";
+ $ft_global_file_name = "ft-global.dat";
+ $ft_local_file_name = "ft-local.dat";
+ }
+
+ if ( $opt_s ) {
+ ($spark_file_name, $spark_global_file_name, $spark_local_file_name) =
+ do mk_global_local_names($opt_s);
+ } else {
+ $spark_file_name = "spark.dat";
+ $spark_global_file_name = "spark-global.dat";
+ $spark_local_file_name = "spark-local.dat";
+ }
+
+ if ( $opt_a ) {
+ ($ha_file_name, $ha_global_file_name, $ha_local_file_name) =
+ do mk_global_local_names($opt_a);
+ } else {
+ $ha_file_name = "ha.dat";
+ }
+
+ if ( $opt_p ) {
+ $gp_file_name = $opt_p;
+ } else {
+ $gp_file_name = "gran.gp";
+ }
+
+ $ps_file_name = &dat2ps_name($gp_file_name);
+
+ $corr_file_name = "CORR";
+ $cumulat_rts_file_name = "cumulative-rts.dat";
+ $cumulat_has_file_name = "cumulative-has.dat";
+ $cumulat_fts_file_name = "cumulative-fts.dat";
+ $cumulat_cps_file_name = "cumulative-cps.dat";
+ $clust_rts_file_name = "clusters-rts.dat";
+ $clust_has_file_name = "clusters-has.dat";
+ $clust_fts_file_name = "clusters-fts.dat";
+ $clust_cps_file_name = "clusters-cps.dat";
+ $pe_file_name = "pe.dat";
+ $sn_file_name = "sn.dat";
+
+ $pie_file_name = "Pie.ps";
+
+ $cps_file_name = "CPS";
+ $fts_file_name = "FTS";
+ $rts_file_name = "RTS";
+ $has_file_name = "HAS";
+ $lsps_file_name = "LSPS";
+ $gsps_file_name = "GSPS";
+ $ccps_file_name = "CCPS";
+
+ if ( $opt_l ) {
+ $left_margin = $opt_l;
+ } else {
+ $left_margin = 0;
+ }
+ $left_perc_margin = 0;
+
+ if ( $opt_r ) {
+ $right_margin = $opt_r;
+ } else {
+ $right_margin = 0;
+ }
+ $right_perc_margin = 0;
+
+ if ( $opt_x ) {
+ $xsize = $opt_x;
+ } else {
+ $xsize = 1;
+ }
+
+ if ( $opt_y ) {
+ $ysize = $opt_y;
+ } else {
+ $ysize = 1;
+ }
+
+ if ( $opt_e ) {
+ $ext_size = $opt_e;
+ } else {
+ $ext_size = 200;
+ }
+
+ if ( $opt_i ) {
+ $gray = $opt_i;
+ } else {
+ $gray = 0;
+ }
+
+ if ( $opt_k ) {
+ $no_of_clusters = $opt_k;
+ } else {
+ $no_of_clusters = 5;
+ }
+
+ if ( $opt_L ) {
+ $str = $opt_L;
+ $str =~ s/[\(\)\[\]]//g;
+ %logscale = split(/[,;. ]+/, $str);
+ # $logscale = $opt_L;
+ } else {
+ %logscale = (); # ('g',"xy",'a',"xy",'Cg',"xy",'Ca',"xy");
+ }
+
+# $delta = do compute_delta(@exec_times);
+# $no_of_exec_times = $#exec_times;
+
+ if ( $opt_G ) {
+ $opt_G =~ s/[\(\)\[\]]//g;
+ @exec_times = split(/[,;. ]+/, $opt_G);
+ # @exec_times = split(/[,;. ]+/, ($opt_G =~ s/[\(\)]//g));
+ } else {
+ # @exec_times = (50, 100, 200, 300, 400, 500, 700);
+ }
+
+ if ( $opt_F ) {
+ $opt_F =~ s/[\(\)\[\]]//g;
+ @fetch_times = split(/[,;. ]+/, $opt_F);
+ # @fetch_times = split(/[,;. ]+/, ($opt_F =~ s/[\(\)]//g));
+ } else {
+ # @fetch_times = (50, 100, 200, 300, 400, 500, 700);
+ }
+
+ if ( $opt_C ) {
+ $opt_C =~ s/[\(\)\[\]]//g;
+ @comm_percs = split(/[,;. ]+/, $opt_C);
+ } else {
+ # @comm_percs = (0,10,20,30,50,100);
+ }
+
+ if ( $opt_S ) {
+ $opt_S =~ s/[\(\)\[\]]//g;
+ @sparks = split(/[,;. ]+/, $opt_S);
+ } else {
+ # @sparks = (0,5,10,50);
+ }
+
+# $delta_comm = do compute_delta(@comm_percs);
+
+ if ( $opt_A ) {
+ $opt_A =~ s/[\(\)\[\]]//g;
+ @has = split(/[,;. ]+/, $opt_A);
+ } else {
+ # @has = (10, 100, 200, 300, 500, 1000);
+ }
+
+ if ( $opt_t ) {
+ $templ_file_name = ( $opt_t eq '.' ? "TEMPL" # default file name
+ : $opt_t eq ',' ? "/users/fp/hwloidl/grasp/GrAn/bin/TEMPL" # global master template
+ : $opt_t eq '/' ? "/users/fp/hwloidl/grasp/GrAn/bin/T0" # template, that throws away most of the info
+ : $opt_t );
+ do read_template();
+ # see RTS2gran for use of template-package
+ }
+
+ $ylabel = $opt_P ? "% of threads" : "No. of threads";
+}
+
+# ----------------------------------------------------------------------------
+
+sub print_verbose_message {
+
+ print "-" x 70 . "\n";
+ print "Setup: \n";
+ print "-" x 70 . "\n";
+ print "\nFilenames: \n";
+ print " Input file: $input\n";
+ print " Gran files: $gran_file_name $gran_global_file_name $gran_local_file_name\n";
+ print " Comm files: $comm_file_name $comm_global_file_name $comm_local_file_name\n";
+ print " Sparked threads file: $spark_file_name $spark_local_file_name $spark_global_file_name\n";
+ print " Heap file: $ha_file_name\n";
+ print " GNUPLOT file name: $gp_file_name Correlation file name: $corr_file_name\n";
+ print " Cumulative RT file name: $cumulat_rts_file_name \n Cumulative HA file name: $cumulat_has_file_name\n";
+ print " Cluster RT file name: $clust_rts_file_name \n Cluster HA file name: $clust_has_file_name\n";
+ print " Cumulative runtimes file name: $cumulat_rts_file_name\n";
+ print " Cumulative heap allocations file name $cumulat_has_file_name\n";
+ print " Cluster run times file name: $clust_rts_file_name\n";
+ print " Cluster heap allocations file name: $clust_has_file_name\n";
+ print " PE load file name: $pe_file_name\n";
+ print " Site size file name: $sn_file_name\n";
+ print "\nBoundaries: \n";
+ print " Gran boundaries: @exec_times\n";
+ print " Comm boundaries: @comm_percs\n";
+ print " Sparked threads boundaries: @sparks\n";
+ print " Heap boundaries: @has\n";
+ print "\nOther pars: \n";
+ print " Left margin: $left_margin Right margin: $right_margin\n";
+ print " GP-extension: $ext_size GP xsize: $xsize GP ysize: $ysize\n";
+ print " Gray scale: $gray Smart x-tics is " . ($opt_T ? "ON" : "OFF") .
+ " Percentage y-axis is " . ($opt_P ? "ON" : "OFF") . "\n";
+ print " Log. scaling assoc list: ";
+ while (($key,$value) = each %logscale) {
+ print "$key: $value, ";
+ }
+ print "\n";
+ print " Active template file: $templ_file\n" if $opt_t;
+ print "-" x 70 . "\n";
+}
+
+# ----------------------------------------------------------------------------
+
+sub sort_and_cum {
+
+@sorted_rts = sort {$a <=> $b} @all_rts;
+
+($xstart_cluster_rts,$xend_cluster_rts,$max_cluster_rts,$xtics_cluster_rts) =
+ &write_cumulative_data($cumulat_rts_file_name,$clust_rts_file_name,@sorted_rts);
+
+$xend_cum_rts = pop(@sorted_rts);
+$yend_cum_rts = 100;
+$yend_cum0_rts = $#sorted_rts+1; # unpercentified cum graph
+
+open(RTS,">$rts_file_name") || die "$rts_file_name: $!";
+print RTS "Sorted list of all runtimes:\n";
+print RTS join("\n",@sorted_rts);
+close(RTS);
+
+@sorted_has = sort {$a <=> $b} @all_has;
+
+($xstart_cluster_has,$xend_cluster_has,$max_cluster_has,$xtics_cluster_has) =
+ &write_cumulative_data($cumulat_has_file_name,$clust_has_file_name,@sorted_has);
+
+$xend_cum_has = pop(@sorted_has);
+$yend_cum_has = 100;
+$yend_cum0_has = $#sorted_has+1; # unpercentified cum graph
+
+open(HAS,">$has_file_name") || die "$has_file_name: $!";
+print HAS "Sorted list of all heap allocations:\n";
+print HAS join("\n",@sorted_has);
+close(HAS);
+
+@sorted_lsps = sort {$a <=> $b} @all_local_sparks;
+
+open(LSPS,">$lsps_file_name") || die "$lsps_file_name: $!";
+print LSPS "Sorted list of all local sparks:\n";
+print LSPS join("\n",@sorted_lsps);
+close(LSPS);
+
+@sorted_gsps = sort {$a <=> $b} @all_global_sparks;
+
+open(GSPS,">$gsps_file_name") || die "$gsps_file_name: $!";
+print GSPS "Sorted list of all global sparks:\n";
+print GSPS join("\n",@sorted_gsps);
+close(GSPS);
+
+@sorted_fts = sort {$a <=> $b} @all_fts;
+
+($xstart_cluster_fts,$xend_cluster_fts,$max_cluster_fts,$xtics_cluster_fts) =
+ &write_cumulative_data($cumulat_fts_file_name,$clust_fts_file_name,@sorted_fts);
+
+$xend_cum_fts = pop(@sorted_fts);
+$yend_cum_fts = 100;
+$yend_cum0_fts = $#sorted_fts+1; # unpercentified cum graph
+
+open(FTS,">$fts_file_name") || die "$FTS_file_name: $!";
+print FTS "Sorted list of all communication times:\n";
+print FTS join("\n",@sorted_fts);
+close(FTS);
+
+@sorted_comm_percs = sort {$a <=> $b} @all_comm_percs;
+
+($xstart_cluster_cps,$xend_cluster_cps,$max_cluster_cps,$xtics_cluster_cps) =
+ &write_cumulative_data($cumulat_cps_file_name,$clust_cps_file_name,@sorted_comm_percs);
+
+$xend_cum_cps = 100; # pop(@sorted_comm_percs);
+$yend_cum_cps = 100;
+$yend_cum0_cps = $#sorted_comm_percs+1; # unpercentified cum graph
+
+open(CCPS,">$ccps_file_name") || die "$ccps_file_name: $!";
+print CCPS "Sorted list of all communication percentages:\n";
+print CCPS join("\n",@sorted_comm_percs);
+close(CCPS);
+
+($xstart_pe,$xend_pe,$max_pe,$xtics_pe) =
+ &write_array($pe_file_name,$#pe_load,@pe_load);
+
+($xstart_sn,$xend_sn,$max_sn,$xtics_sn) =
+ &write_array($sn_file_name,$#site_size,@site_size);
+
+if ( $opt_D ) {
+ print "After write_array: xstart, xend, max _sn: $xstart_sn,$xend_sn,$max_sn,$xtics_sn\n";
+}
+}
+
+# ----------------------------------------------------------------------------
+# Compute statistical values (like mean, std_dev and especially corr coeff).
+# Write the important info to a file.
+# ----------------------------------------------------------------------------
+
+sub do_statistics {
+ local ($n) = @_;
+
+ if ( $n <= 1 ) {
+ print "Sorry, no statistics for just $n threads\n";
+ return -1;
+ }
+
+# Compute mean values and std deviations
+# ......................................
+
+ ($mean_rt,$std_dev_rt) = &mean_std_dev($sum_rt,@all_rts);
+ ($mean_comm_perc,$std_dev_comm_perc) = &mean_std_dev($sum_comm_perc,@all_comm_percs);
+ ($mean_spark,$std_dev_spark) = &mean_std_dev($sum_sp,@all_sparks);
+ ($mean_local_spark,$std_dev_local_spark) = &mean_std_dev($sum_local_sp,@all_local_sparks);
+ ($mean_global_spark,$std_dev_global_spark) = &mean_std_dev($sum_global_sp,@all_global_sparks);
+ ($mean_ha,$std_dev_ha) = &mean_std_dev($sum_ha,@all_has);
+ ($mean_ft,$std_dev_ft) = &mean_std_dev($sum_ft,@all_fts);
+
+# Compute correlation coefficients
+# ................................
+
+ $c_exec_ha = &corr($#all_rts+1,$sum_rt,@all_rts,$sum_ha,@all_has);
+ $c_exec_sp = &corr($#all_rts+1,$sum_rt,@all_rts,$sum_sp,@all_sparks);
+ $c_exec_lsp = &corr($#all_rts+1,$sum_rt,@all_rts,$sum_local_sp,@all_local_sparks);
+ $c_exec_gsp = &corr($#all_rts+1,$sum_rt,@all_rts,$sum_global_sp,@all_global_sparks);
+ $c_ha_sp = &corr($#all_has+1,$sum_ha,@all_has,$sum_sp,@all_sparks);
+ $c_ha_lsp = &corr($#all_has+1,$sum_ha,@all_has,$sum_local_sp,@all_local_sparks);
+ $c_ha_gsp = &corr($#all_has+1,$sum_ha,@all_has,$sum_global_sp,@all_global_sparks);
+ $c_exec_ft = &corr($#all_rts+1,$sum_rt,@all_rts,$sum_ft,@all_fts);
+ $c_ha_ft = &corr($#all_has+1,$sum_ha,@all_has,$sum_ft,@all_fts);
+ $c_lsp_ft = &corr($#all_local_sparks+1,$sum_local_sp,@all_local_sparks,$sum_ft,@all_fts);
+ $c_gsp_ft = &corr($#all_global_sparks+1,$sum_global_sp,@all_global_sparks,$sum_ft,@all_fts);
+
+# Write corr coeffs into a file
+# .............................
+
+ open(CORR,">$corr_file_name") || die "Couldn't open file $corr_file_name\n";
+ #printf CORR ("%f\n%f\n%f\n%f\n%f",$c_exec_ha,$c_exec_lsp,$c_exec_gsp,$c_ha_lsp,$c_ha_gsp) ;
+ printf CORR ("CORR of runtime and heap alloc: %f\n",$c_exec_ha);
+ printf CORR ("CORR of runtime and no. of sparks: %f\n",$c_exec_sp);
+ printf CORR ("CORR of heap alloc and no. sparks: %f\n",$c_ha_sp);
+ printf CORR ("CORR of runtime and no. of local sparks: %f\n",$c_exec_lsp);
+ printf CORR ("CORR of runtime and no. of global sparks: %f\n",$c_exec_gsp);
+ printf CORR ("CORR of heap alloc and no. local sparks: %f\n",$c_ha_lsp);
+ printf CORR ("CORR of heap alloc and no. global sparks: %f\n",$c_ha_gsp);
+ printf CORR ("CORR of runtime and communication time: %f\n",$c_exec_ft);
+ printf CORR ("CORR of heap alloc and communication time: %f\n",$c_ha_ft);
+ printf CORR ("CORR of no. of local sparks and communication time: %f\n",$c_lsp_ft);
+ printf CORR ("CORR of no. of global sparks and communication time: %f\n",$c_gsp_ft);
+ close(CORR);
+
+# These are needed later in the GNUPLOT files
+# ...........................................
+
+ $max_rt_class = &list_max(@exec_class);
+ $max_rt_global_class = &list_max(@exec_global_class);
+ $max_rt_local_class = &list_max(@exec_local_class);
+ $max_comm_perc_class = &list_max(@comm_class);
+ $max_comm_perc_global_class = &list_max(@comm_global_class);
+ $max_comm_perc_local_class = &list_max(@comm_local_class);
+ $max_spark_class = &list_max(@spark_class);
+ $max_spark_local_class = &list_max(@spark_local_class);
+ $max_spark_global_class = &list_max(@spark_global_class);
+ $max_ha_class = &list_max(@ha_class);
+ $max_ft_class = &list_max(@fetch_class);
+
+}
+
+# ----------------------------------------------------------------------------
+# This is written to STDOUT at the end of the file processing (before
+# gnuplotting and such) if the verbose option is given.
+# ----------------------------------------------------------------------------
+
+sub print_general_info {
+
+ printf("\nTotal number of lines: %d\n", $line_no);
+
+ print "\nDistribution of execution times: \n";
+ print " Intervals: " . join('|',@exec_times) . "\n";
+ print " Total: " . join('|',@exec_class) . "\n";
+ print " Global: " . join('|',@exec_global_class) . "\n";
+ print " Local: " . join('|',@exec_local_class) . "\n";
+
+ $total=0; foreach $i (@exec_class) { $total += $i ; }
+ $global=0; foreach $i (@exec_global_class) { $global += $i ; }
+ $local=0; foreach $i (@exec_local_class) { $local += $i ; }
+
+ print " Sum of classes (should be " . $line_no . "): " . $total .
+ " (global/local)=(" . $global . "/" . $local . ")\n";
+ print " Mean value: $mean_rt Std dev: $std_dev_rt\n";
+
+ print "\nPercentage of communication: \n";
+ print " Intervals: " . join('|',@comm_percs) . "\n";
+ print " Total: " . join('|',@comm_class) . "\n";
+ print " Global: " . join('|',@comm_global_class) . "\n";
+ print " Local: " . join('|',@comm_local_class) . "\n";
+ print " Values outside closed int: Total: " . $outside .
+ " Global: " . $outside_global . " Local: " . $outside_local . "\n";
+
+ $total=0; foreach $i (@comm_class) { $total += $i ; }
+ $global=0; foreach $i (@comm_global_class) { $global += $i ; }
+ $local=0; foreach $i (@comm_local_class) { $local += $i ; }
+
+ print " Sum of classes (should be " . $line_no . "): " . $total .
+ " (global/local)=(" . $global . "/" . $local . ")\n";
+ print " Mean value: $mean_comm_perc Std dev: $std_dev_comm_perc\n";
+
+ print "\nSparked threads: \n";
+ print " Intervals: " . join('|',@sparks) . "\n";
+ print " Total allocs: " . join('|',@spark_class) . "\n";
+
+ $total=0; foreach $i (@spark_class) { $total += $i ; }
+
+ print " Sum of classes (should be " . $line_no . "): " . $total . "\n";
+ print " Mean value: $mean_spark Std dev: $std_dev_spark\n";
+
+ print "\nHeap Allcoations: \n";
+ print " Intervals: " . join('|',@has) . "\n";
+ print " Total allocs: " . join('|',@ha_class) . "\n";
+
+ $total=0; foreach $i (@ha_class) { $total += $i ; }
+
+ print " Sum of classes (should be " . $line_no . "): " . $total . "\n";
+ print " Mean value: $mean_ha Std dev: $std_dev_ha\n";
+ print "\n";
+ print "CORRELATION between runtimes and heap allocations: $c_exec_ha \n";
+ print "CORRELATION between runtime and no. of sparks: $c_exec_sp \n";
+ print "CORRELATION between heap alloc and no. sparks: $c_ha_sp \n";
+ print "CORRELATION between runtimes and locally sparked threads: $c_exec_lsp \n";
+ print "CORRELATION between runtimes and globally sparked threads: $c_exec_gsp \n";
+ print "CORRELATION between heap allocations and locally sparked threads: $c_ha_lsp \n";
+ print "CORRELATION between heap allocations and globally sparked threads: $c_ha_gsp \n";
+ print "CORRELATION between runtime and communication time: $c_exec_ft\n";
+ print "CORRELATION between heap alloc and communication time: $c_ha_ft\n";
+ print "CORRELATION between no. of local sparks and communication time: $c_lsp_ft\n";
+ print "CORRELATION between no. of global sparks and communication time: $c_gsp_ft\n";
+ print "\n";
+
+}
+
+# ----------------------------------------------------------------------------
+# Old (obsolete) stuff
+# ----------------------------------------------------------------------------
+#
+#for ($index=0;
+# $index <= &list_max($#spark_local_class,$#spark_local_class);
+# $index++) {
+# $spark_class[$index] = $spark_local_class[$index] + $spark_global_class[$index];
+#}
+#
+#for ($index=0, $sum_sp=0;
+# $index <= &list_max($#all_local_sparks,$#all_global_sparks);
+# $index++) {
+# $all_sparks[$index] = $all_local_sparks[$index] + $all_global_sparks[$index];
+# $sum_sp += $all_sparks[$index];
+#}
+#
+# ----------------------------------------------------------------------------
+#
+#sub compute_delta {
+# local (@times) = @_;
+#
+# return ($times[$#times] - $times[$#times-1]);
+#}
+#
+# ----------------------------------------------------------------------------
+
+sub insert_elem {
+ local ($elem,$val,$n,*list1,*list2) = @_;
+ local (@small_part, $i, $len);
+
+ if ( $opt_D ) {
+ print "Inserting val $val (with elem $elem) in the following list: \n" .
+ @list . "\n yields the lists: \n ";
+ }
+
+ for ($i=0; $i<=$#list2 && $list2[$i]>$val; $i++) { }
+ $len = $#list2 - $i + 1;
+ if ( $len == 0 ) {
+ push(@list1,$elem);
+ push(@list2,$val);
+ } else {
+ splice(@list1,$i,0,$elem);
+ splice(@list2,$i,0,$val);
+ }
+
+ if ( $opt_D ) {
+ print @list1 . "\n and \n" . @list2;
+ }
+
+}
+
+# ----------------------------------------------------------------------------
+
+sub skip_header {
+ local ($in_header);
+
+ $in_header = 9;
+ while (<INPUT>) {
+ if ( $in_header = 9 ) {
+ if (/^=/) {
+ $gum_style_gr = 1;
+ $in_header = 0;
+ $prg = "????"; #
+ $pars = "-b??????"; #
+ $nPEs = 1; #
+ $lat = 1;
+ return ($prg, $pars, $nPEs, $lat);
+ } else {
+ $gum_style_gr = 0;
+ $in_header = 1;
+ }
+
+ }
+ $prg = $1, $pars = $2 if /^Granularity Simulation for\s+(\w+)\s+(.*)$/;
+ $nPEs = $1 if /^PEs\s+(\d+)/;
+ $lat = $1, $fetch = $2 if /^Latency\s+(\d+)[^F]+Fetch\s+(\d+)/;
+
+ last if /^\+\+\+\+\+/;
+ }
+}
+
+# ----------------------------------------------------------------------------
+
+sub write_pie_chart {
+ local ($rt_perc, $bt_perc, $ft_perc, $it_perc);
+ local ($title, $title_sz, $label_sz, $x_center, $y_center, $radius);
+
+ $PieChart = "/users/fp/hwloidl/grasp/GrAn/bin/PieChart.ps";
+
+ $title = "Original Glaswegian Communication Pie (tm)";
+ $title_sz = 24;
+ $label_sz = 12;
+ $x_center = 300;
+ $y_center = 400;
+ $radius = 100;
+
+ open(PIE,">$pie_file_name") || die "$pie_file_name: $!";
+
+ print PIE "%!PS-Adobe-2.0\n";
+ print PIE "%%Title: Pie Chart\n";
+ print PIE "%%Creator: gran-extr\n";
+ print PIE "%%CreationDate: Ides of March 44 B.C.\n";
+ print PIE "%%EndComments\n";
+ print PIE "\n";
+ print PIE "% Def of PieChart is taken from:\n";
+ print PIE "% ($PieChart) run\n";
+ print PIE "\n";
+
+ open(PIE_CHART,"<$PieChart") || die "$PieChart: $!";
+ while (<PIE_CHART>){
+ print PIE $_;
+ }
+ close (PIE_CHART);
+ print PIE "\n";
+
+ $rt_perc = $tot_rt / $tot_total_rt;
+ $bt_perc = $tot_bt / $tot_total_rt;
+ $ft_perc = $tot_ft / $tot_total_rt;
+ $it_perc = $tot_it / $tot_total_rt;
+
+ print PIE "($title) $title_sz $label_sz % Title, title size and label size\n" .
+ "[ % PS Array of (descrition, percentage [0, .., 1])\n" .
+ "[(Run Time) $rt_perc]\n" .
+ "[(Block Time) $bt_perc]\n" .
+ "[(Fetch Time) $ft_perc]\n" .
+ "[(Ready Time) $it_perc]\n" .
+ "] $x_center $y_center $radius DrawPieChart\n";
+ print PIE "showpage\n";
+
+ close(PIE);
+}
+
+# ----------------------------------------------------------------------------
+
+sub basename {
+ local ($in_str) = @_;
+ local ($str,$i) ;
+
+ $i = rindex($in_str,"/");
+ if ($i == -1) {
+ $str = $in_str;
+ } else {
+ $str = substr($in_str,$i+1) ;
+ }
+
+ return $str;
+}
+
+# ----------------------------------------------------------------------------
+
+sub dirname {
+ local ($in_str) = @_;
+ local ($str,$i) ;
+
+ $i = rindex($in_str,"/");
+ if ($i == -1) {
+ $str = "";
+ } else {
+ $str = substr($in_str,0,$i+1) ;
+ }
+
+ return $str;
+}
+
+# ----------------------------------------------------------------------------
+
diff --git a/ghc/utils/parallel/grs2gr.pl b/ghc/utils/parallel/grs2gr.pl
index d30c7777ce..ab398a53d9 100644
--- a/ghc/utils/parallel/grs2gr.pl
+++ b/ghc/utils/parallel/grs2gr.pl
@@ -1,3 +1,5 @@
+#!/usr/local/bin/perl
+
#
# Convert several .gr files (from the same GUM run) into a single
# .gr file with all times adjusted relative to the earliest start
@@ -9,13 +11,14 @@ $count = 0;
foreach $i (@ARGV) {
open(GR, $i) || die "Can't read $i\n";
$cmd = <GR>;
+ $dateline = <GR>;
$start = <GR>;
($pe, $timestamp) = ($start =~ /PE\s+(\d+) \[(\d+)\]/);
die "PE $pe too high\n" if $pe > $#ARGV;
$proc[$count++] = $pe;
$prog[$pe] = $cmd;
$time[$pe] = $timestamp;
- close(GR) || die "Can't close $i\n";
+ close(GR);
}
$basetime = 0;
@@ -28,16 +31,18 @@ for($i = 0; $i < $count; $i++) {
}
print $cmd;
+print $dateline;
for($i = 0; $i < $count; $i++) {
$pe = $proc[$i];
$delta = $time[$pe] - $basetime;
open(GR, $ARGV[$i]) || die "Can't read $ARGV[i]\n";
$cmd = <GR>;
+ $dateline = <GR>;
$start = <GR>;
while(<GR>) {
/PE\s+(\d+) \[(\d+)\]/;
printf "PE %2u [%lu]%s", $1, $2 + $delta, $';
}
- close(GR) || die "Can't close $ARGV[$i]\n";
+ close(GR);
}
diff --git a/ghc/utils/parallel/ps-scale-y.pl b/ghc/utils/parallel/ps-scale-y.pl
new file mode 100644
index 0000000000..0e1242081c
--- /dev/null
+++ b/ghc/utils/parallel/ps-scale-y.pl
@@ -0,0 +1,188 @@
+#!/usr/local/bin/perl
+##############################################################################
+# Time-stamp: <Wed Jul 24 1996 22:19:02 Stardate: [-31]7859.44 hwloidl>
+#
+# Usage: ps-scale-y [options] <file>
+#
+# It is assumed that the last line of <file> is of the format:
+# %% y_scaling: <f> max: <n>
+# where <f> is a floating point number determining the amount of scaling of
+# the y-axis of the graph that is necessary. <n> is the real maximal number
+# of tasks in the program (needed to rebuild y-axis). This script replaces the
+# definitions of the PostScript functions scale-y and unscale-y in <file> by
+# new definitions that do the right amount of scaling.
+# The y-axis is rebuilt (using the above maximal number of tasks and a copy
+# of the print_y_axis routine from qp2ps).
+# If the above line doesn't exist, <file> is unchanged.
+# This script is typically called from gr2ps.
+#
+##############################################################################
+
+require "getopts.pl";
+
+&Getopts('hv');
+
+do process_options();
+
+$tmpfile = ",t";
+$debug = 0;
+
+# NB: This must be the same as in qp2ps!!
+
+$xmin = 100;
+$xmax = 790;
+
+$scalex = $xmin;
+$labelx = $scalex - 45;
+$markx = $scalex - 30;
+$major = $scalex - 5;
+$majorticks = 10;
+
+$mmax = 1;
+
+$amax = 0;
+$ymin = 50;
+$ymax = 500;
+
+# E
+open (GET_SCALING,"cat $file | tail -1 |") || die "Can't open pipe: $file | tail -1 |\n";
+
+$y_scaling = 1.0;
+
+while (<GET_SCALING>){
+ # print STDERR $_;
+ if (/^\%\%\s+y_scaling:\s+([0-9\.]+)\s+max:\s+(\d+)/) {
+ $y_scaling = $1;
+ $pmax = $2;
+ $y_translate = 1.0 - $y_scaling;
+ }
+}
+close (GET_SCALING);
+
+if ( $y_scaling != 1.0 ) {
+ print STDERR "Scaling $file ($y_scaling; $pmax tasks) ...\n" if $opt_v;
+ # print STDERR "SCALING NECESSARY: y_scaling = $y_scaling; y_translate = $y_translate !\n";
+} else {
+ # No scaling necessary!!
+ exit 0;
+}
+
+
+open (IN,"<$file") || die "Can't open file $file\n";
+open (OUT,">$tmpfile") || die "Can't open file $tmpfile\n";
+
+$skip = 0;
+while (<IN>) {
+ $skip = 0 if $skip && /^% End Y-Axis.$/;
+ next if $skip;
+ if (/\/scale\-y/) {
+ print OUT "/scale-y { gsave\n" .
+ " 0 50 $y_translate mul translate\n" .
+ " 1 $y_scaling scale } def\n";
+ }
+ elsif (/\/unscale\-y/) {
+ print OUT "/unscale-y { grestore } def \n";
+ } else {
+ print OUT $_;
+ }
+ if (/^% Y-Axis:$/) {
+ $skip = 1;
+ do print_y_axis();
+ }
+}
+
+close (IN);
+close (OUT);
+
+rename($tmpfile,$file);
+
+exit 0;
+
+# ###########################################################################
+# Same as in qp2ps (but printing to OUT)!
+# ###########################################################################
+
+sub print_y_axis {
+ local ($i);
+ local ($y, $smax,$majormax, $majorint);
+
+# Y-axis label
+
+ print OUT "% " . ("-" x 75) . "\n";
+ print OUT "% Y-Axis (scaled):\n";
+ print OUT "% " . ("-" x 75) . "\n";
+
+ print OUT ("%scale-y % y-axis outside scaled area if ps-scale-y rebuilds it!\n");
+
+ print OUT ("gsave\n");
+ print OUT ("HE12 setfont\n");
+ print OUT ("(tasks)\n");
+ print OUT ("dup stringwidth pop\n");
+ print OUT ("$ymax\n");
+ print OUT ("exch sub\n");
+ print OUT ("$labelx exch\n");
+ print OUT ("translate\n");
+ print OUT ("90 rotate\n");
+ print OUT ("0 0 moveto\n");
+ print OUT ("show\n");
+ print OUT ("grestore\n");
+
+# Scale
+
+ if ($pmax < $majorticks) {
+ $majorticks = $pmax;
+ }
+
+ print OUT ("HE12 setfont\n$scalex $ymin moveto\n$scalex $ymax lineto\n");
+ print OUT ("% Max number of tasks: $pmax\n");
+ print OUT ("% Number of ticks: $majorticks\n");
+
+ print OUT "0.5 setlinewidth\n";
+
+ $y = $ymax; # (($pmax - $ymin)/$majorticks) * ($majorticks-$i) + $ymin;
+ print OUT ("$scalex $y moveto\n$major $y lineto\n");
+ print OUT ("$markx $y moveto\n($pmax) show\n");
+
+ $majormax = int($pmax/$majorticks)*$majorticks;
+ $smax = $majormax*(($ymax-$ymin)/$pmax)+$ymin;
+ $majorint = $majormax/$majorticks;
+
+ for($i=1; $i <= $majorticks; ++$i) {
+ $y = (($smax - $ymin)/$majorticks) * ($majorticks-$i) + $ymin;
+ $majorval = int($majorint * ($majormax/$majorint-$i));
+ print OUT ("$scalex $y moveto\n$major $y lineto\n");
+ print OUT ("$markx $y moveto\n($majorval) show\n");
+ }
+
+ # print OUT ("$xmin $ymax moveto\n10 0 rlineto\n10 0 rmoveto\n($pmax) show\n");
+ print OUT " stroke\n";
+ print OUT "1 setlinewidth\n";
+ print OUT ("%unscale-y\n");
+ print OUT ("% End Y-Axis (scaled).\n");
+ print OUT "% " . ("-" x 75) . "\n";
+}
+
+# ----------------------------------------------------------------------------
+
+sub process_options {
+
+ if ( $opt_h ) {
+ open(ME,$0) || die "Can't open myself ($0): $!\n";
+ $n = 0;
+ while (<ME>) {
+ last if $_ =~ /^$/;
+ print $_;
+ $n++;
+ }
+ close(ME);
+ exit ;
+ }
+
+ if ( $#ARGV != 0 ) {
+ print "Usage: $0 [options] <file>\n";
+ print "Use -h option to get details\n";
+ exit 1;
+ }
+
+ $file = $ARGV[0];
+}
diff --git a/ghc/utils/parallel/qp2ap.pl b/ghc/utils/parallel/qp2ap.pl
new file mode 100644
index 0000000000..b3c3bcf122
--- /dev/null
+++ b/ghc/utils/parallel/qp2ap.pl
@@ -0,0 +1,495 @@
+#! /usr/local/bin/perl
+##############################################################################
+# Time-stamp: <Wed Jul 24 1996 22:05:31 Stardate: [-31]7859.39 hwloidl>
+#
+# Usage: qp2ap [options] <max-x> <max-y> <prg> <date>
+#
+# Filter that transforms a quasi-parallel profile (a .qp file) at stdin to
+# a PostScript file at stdout, showing an activity profile with one horizontal
+# line for each task (thickness of the line shows if it's active or suspended).
+#
+# Options:
+# -o <file> ... write .ps file to <file>
+# -m ... create mono PostScript file instead a color one.
+# -O ... optimise i.e. try to minimise the size of the .ps file.
+# -s <n> ... scaling factor of y axis (default: 1)
+# -w <n> ... width of lines denoting running threads (default: 2)
+# -v ... be talkative.
+# -h ... print help message (this header).
+#
+##############################################################################
+
+
+require "getopts.pl";
+
+&Getopts('hvms:w:OlD');
+
+do process_options();
+
+if ( $opt_v ) {
+ do print_verbose_message();
+}
+
+# ---------------------------------------------------------------------------
+# Init
+# ---------------------------------------------------------------------------
+
+$y_scaling = 0;
+$gtid = 1; # number of process so far = $gtid-1
+
+$xmin = 100;
+$xmax = 790;
+
+$scalex = $xmin;
+$labelx = $scalex - 45;
+$markx = $scalex - 30;
+$major = $scalex - 5;
+$majorticks = 10;
+
+# $pmax = 40;
+$ymin = 50;
+$ymax = 500;
+
+if ( ($ymax - $ymin)/$pmax < 3 ) {
+ print STDERR "Warning: Too many tasks! Distance will be smaller than 3 pixels.\n";
+}
+
+if ( !$width ) {
+ $width = 2/3 * ($ymax - $ymin)/$pmax;
+}
+
+do write_prolog();
+do print_y_axis();
+
+# ---------------------------------------------------------------------------
+# Main Part
+# ---------------------------------------------------------------------------
+
+while(<STDIN>) {
+ next if /^[^0-9]/; # ignore lines not beginning with a digit (esp. last)
+ chop;
+ ($time, $event, $tid, $addr, $tid2, $addr2) = split;
+
+ if ( $event eq "*G") {
+ $TID{$addr} = $gtid++;
+ $START{$addr} = $time;
+ }
+
+ elsif ($event eq "*A") {
+ $TID{$addr} = $gtid++;
+ $SUSPEND{$addr} = $time;
+ }
+
+ elsif ($event eq "G*" || $event eq "GR" ) {
+ do psout($START{$addr},$time,$TID{$addr},"runlineto");
+# $STOP{$addr} = $time;
+ }
+
+ elsif ($event eq "GA" || $event eq "GC" || $event eq "GY") {
+ do psout($START{$addr},$time,$TID{$addr},"runlineto");
+ $SUSPEND{$addr} = $time;
+ }
+
+ elsif ($event eq "RA") {
+ $SUSPEND{$addr} = $time;
+ }
+
+ elsif ($event eq "YR") {
+ do psout($SUSPEND{$addr},$time,$TID{$addr},"fetchlineto");
+ }
+
+ elsif ($event eq "CA" || $event eq "YA" ) {
+ do psout($SUSPEND{$addr},$time,$TID{$addr},"fetchlineto");
+ $SUSPEND{$addr} = $time;
+ }
+
+ elsif ($event eq "AC" || $event eq "AY" ) {
+ do psout($SUSPEND{$addr},$time,$TID{$addr},"suspendlineto");
+ $SUSPEND{$addr} = $time;
+ }
+
+ elsif ($event eq "RG") {
+ $START{$addr} = $time;
+ }
+
+ elsif ($event eq "AG") {
+ do psout($SUSPEND{$addr},$time,$TID{$addr},"suspendlineto");
+ $START{$addr} = $time;
+ }
+
+ elsif ($event eq "CG" || $event eq "YG" ) {
+ do psout($SUSPEND{$addr},$time,$TID{$addr},"fetchlineto");
+ $START{$addr} = $time;
+ } elsif ( $event eq "B*" || $event eq "*B" || $event eq "BB" ) {
+ print STDERR "Ignoring spark event $event at $time\n" if $opt_v;
+ } else {
+ print STDERR "Unexpected event $event at $time\n";
+ }
+
+ print("%% $time: $event $addr $TID{$addr}\n\n") if $opt_D;
+}
+
+# ---------------------------------------------------------------------------
+
+# Logo
+print("HE14 setfont\n");
+if ( $opt_m ) {
+ print("50 550 asciilogo\n");
+} else {
+ print("50 550 logo\n"); #
+}
+
+# Epilogue
+print("showpage\n");
+
+if ( $gtid-1 != $pmax ) {
+ if ( $pedantic ) {
+ die "Error: Calculated max no. of tasks ($gtid-1) does not agree with stated max. no. of tasks ($pmax)\n";
+ } else {
+ print STDERR "Warning: Calculated total no. of tasks ($gtid-1) does not agree with stated total no. of tasks ($pmax)\n" if $opt_v;
+ $y_scaling = $pmax/($gtid-1);
+ }
+}
+
+
+exit 0;
+
+# ---------------------------------------------------------------------------
+
+sub psout {
+ local($x1, $x2, $y, $cmd) = @_;
+ print("% ($x1,$y) -- ($x2,$y) $cmd\n") if $opt_D;
+ $x1 = int(($x1/$tmax) * ($xmax-$xmin) + $xmin);
+ $x2 = int(($x2/$tmax) * ($xmax-$xmin) + $xmin);
+ $y = int(($y/$pmax) * ($ymax-$ymin) + $ymin);
+ if ( $x1 == $x2 ) {
+ $x2 = $x1 + 1;
+ }
+
+ if ( $opt_l ) {
+ print("newpath\n");
+ print("$x1 $y moveto\n");
+ print("$x2 $y $cmd\n");
+ print("stroke\n");
+ } elsif ( $opt_O ) {
+ print "$x1 $x2 $y " .
+ ( $cmd eq "runlineto" ? "G RL\n" :
+ $cmd eq "suspendlineto" ? "R SL\n" :
+ $cmd eq "fetchlineto" ? "B FL\n" :
+ "\n% ERROR: Unknown command $cmd\n");
+
+ } else {
+ print "$x2 $y $x1 $y " .
+ ( $cmd eq "runlineto" ? "green run\n" :
+ $cmd eq "suspendlineto" ? "red suspend\n" :
+ $cmd eq "fetchlineto" ? "blue fetch\n" :
+ "\n% ERROR: Unknown command $cmd\n");
+ }
+}
+
+# -----------------------------------------------------------------------------
+
+sub get_date {
+ local ($date);
+
+ chop($date = `date`);
+ return ($date);
+}
+
+# -----------------------------------------------------------------------------
+
+sub write_prolog {
+ local ($now);
+
+ $now = do get_date();
+
+ print("%!PS-Adobe-2.0\n");
+ print("%%BoundingBox: 0 0 560 800\n");
+ print("%%Title: Per-thread Activity Profile\n");
+ print("%%Creator: qp2ap\n");
+ print("%%StartTime: $date\n");
+ print("%%CreationDate: $now\n");
+ print("%%Copyright: 1995, 1996 by Hans-Wolfgang Loidl, University of Glasgow\n");
+ print("%%EndComments\n");
+
+ print "% " . "-" x 77 . "\n";
+ print "% Tunable Parameters:\n";
+ print "% The width of a line representing a task\n";
+ print "/width $width def\n";
+ print "% Scaling factor for the y-axis (usful to enlarge)\n";
+ print "/y-scale $y_scale def\n";
+ print "% " . "-" x 77 . "\n";
+
+ print "/total-len $tmax def\n";
+ print "/show-len $xmax def\n";
+ print "/x-offset $xmin def\n";
+ print "/y-offset $ymin def\n";
+ print "% normalize is the PS version of the formula: \n" .
+ "% int(($x1/$tmax) * ($xmax-$xmin) + $xmin) \n" .
+ "% in psout.\n";
+ print "/normalize { total-len div show-len x-offset sub mul x-offset add floor } def\n";
+ print "/x-normalize { exch show-len mul total-len div exch } def\n";
+ print "/y-normalize { y-offset sub y-scale mul y-offset add } def\n";
+ print "/str-len 12 def\n";
+ print "/prt-n { cvi str-len string cvs \n" .
+ " dup stringwidth pop \n" .
+ " currentpoint pop 780 gt { 10 sub } { 2 div } ifelse \n" .
+ " neg 0 rmoveto \n" .
+ " show } def \n" .
+ " % print top-of-stack integer centered at the current point\n";
+ # print "/prt-n { cvi str-len string cvs \n" .
+ # " dup stringwidth pop 2 div neg 0 rmoveto \n" .
+ # " show } def \n" .
+ # " % print top-of-stack integer centered at the current point\n";
+
+ if ( $opt_l ) {
+ print ("/runlineto {1.5 setlinewidth lineto} def\n");
+ print ("/suspendlineto {0.5 setlinewidth lineto} def\n");
+ print ("/fetchlineto {0.2 setlinewidth lineto} def\n");
+ } else {
+ if ( $opt_m ) {
+ if ( $opt_O ) {
+ print "/R { 0 } def\n";
+ print "/G { 0.5 } def\n";
+ print "/B { 0.2 } def\n";
+ } else {
+ print "/red { 0 } def\n";
+ print "/green { 0.5 } def\n";
+ print "/blue { 0.2 } def\n";
+ }
+ print "/set-bg { setgray } def\n";
+ } else {
+ if ( $opt_O ) {
+ print "/R { 0.8 0 0 } def\n";
+ print "/G { 0 0.9 0.1 } def\n";
+ print "/B { 0 0.1 0.9 } def\n";
+ print "/set-bg { setrgbcolor } def\n";
+ } else {
+ print "/red { 0.8 0 0 } def\n";
+ print "/green { 0 0.9 0.1 } def\n";
+ print "/blue { 0 0.1 0.9 } def\n";
+ print "/set-bg { setrgbcolor } def\n";
+ }
+ }
+
+ if ( $opt_O ) {
+ print "% RL: runlineto; draws a horizontal line in given color\n";
+ print "% Operands: x-from x-to y color\n";
+ print "/RL { set-bg % set color \n" .
+ " newpath y-normalize % mangle y val\n" .
+ " 2 index 1 index moveto width setlinewidth \n" .
+ " lineto pop stroke} def\n";
+ print "% SL: suspendlineto; draws a horizontal line in given color (thinner)\n";
+ print "% Operands: x-from x-to y color\n";
+ print "/SL { set-bg % set color \n" .
+ " newpath y-normalize % mangle y val\n" .
+ " 2 index 1 index moveto width 2 div setlinewidth \n" .
+ " lineto pop stroke} def\n";
+ print "% FL: fetchlineto; draws a horizontal line in given color (thinner)\n";
+ print "% Operands: x-from x-to y color\n";
+ print "/FL { set-bg % set color \n" .
+ " newpath y-normalize % mangle y val\n" .
+ " 2 index 1 index moveto width " .
+ ( $opt_m ? " 4 " : " 2 ") .
+ " div setlinewidth \n" .
+ " lineto pop stroke} def\n";
+ } else {
+ print "/run { set-bg newpath 50 sub y-scale mul 50 add moveto width " .
+ "setlinewidth 50 sub y-scale mul 50 add lineto stroke} def\n";
+ print "/suspend { set-bg newpath 50 sub y-scale mul 50 add moveto width " .
+ "2 div setlinewidth 50 sub y-scale mul 50 add lineto stroke} def\n";
+ print "/fetch { set-bg newpath 50 sub y-scale mul 50 add moveto width " .
+ ( $opt_m ? " 4 " : " 2 ") .
+ "div setlinewidth 50 sub y-scale mul 50 add lineto stroke} def\n";
+ #print ("/run { newpath moveto 1.5 setlinewidth lineto stroke} def\n");
+ #print ("/suspend { newpath moveto 0.5 setlinewidth lineto stroke} def\n");
+ }
+ }
+
+ print "/printText { 0 0 moveto (GrAnSim) show } def\n";
+ print "/asciilogo { 5 sub moveto HB16 setfont (GrAnSim) show } def\n";
+ if ( $opt_m ) {
+ print "/logo { asciilogo } def\n";
+ } else {
+ print "/logo { gsave \n" .
+ " translate \n" .
+ " .95 -.05 0\n" .
+ " { dup 1 exch sub 0 exch setrgbcolor printText 1 -.5 translate } for \n" .
+ " 1 0 0 setrgbcolor printText\n" .
+ " grestore} def\n";
+ }
+ print "% For debugging PS uncomment this line and add the file behandler.ps\n";
+ print "% $brkpage begin printonly endprint \n";
+
+ print("/HE10 /Helvetica findfont 10 scalefont def\n");
+ print("/HE12 /Helvetica findfont 12 scalefont def\n");
+ print("/HE14 /Helvetica findfont 14 scalefont def\n");
+ print("/HB16 /Helvetica-Bold findfont 16 scalefont def\n");
+ print "% " . "-" x 77 . "\n";
+ print("newpath\n");
+
+ print("-90 rotate\n");
+ print("-785 30 translate\n");
+ print("0 8.000000 moveto\n");
+ print("0 525.000000 760.000000 525.000000 8.000000 arcto\n");
+ print("4 {pop} repeat\n");
+ print("760.000000 525.000000 760.000000 0 8.000000 arcto\n");
+ print("4 {pop} repeat\n");
+ print("760.000000 0 0 0 8.000000 arcto\n");
+ print("4 {pop} repeat\n");
+ print("0 0 0 525.000000 8.000000 arcto\n");
+ print("4 {pop} repeat\n");
+ print("0.500000 setlinewidth\n");
+ print("stroke\n");
+ print("newpath\n");
+ print("4.000000 505.000000 moveto\n");
+ print("4.000000 521.000000 752.000000 521.000000 4.000000 arcto\n");
+ print("4 {pop} repeat\n");
+ print("752.000000 521.000000 752.000000 501.000000 4.000000 arcto\n");
+ print("4 {pop} repeat\n");
+ print("752.000000 501.000000 4.000000 501.000000 4.000000 arcto\n");
+ print("4 {pop} repeat\n");
+ print("4.000000 501.000000 4.000000 521.000000 4.000000 arcto\n");
+ print("4 {pop} repeat\n");
+ print("0.500000 setlinewidth\n");
+ print("stroke\n");
+
+ print("HE14 setfont\n");
+ print("100 505 moveto\n");
+ print("($pname ) show\n");
+
+ print("($date) dup stringwidth pop 750 exch sub 505.000000 moveto show\n");
+
+ # print "/total-len $tmax def\n";
+ print("-40 -40 translate\n");
+
+ print "% " . "-" x 77 . "\n";
+ print "% Print x-axis:\n";
+ print "/y-val $ymin def % { y-offset 40 sub 2 div y-offset add } def\n";
+ print "0.5 setlinewidth\n";
+ print "x-offset y-val moveto total-len normalize x-offset sub 0 rlineto stroke\n";
+ print "0 total-len 10 div total-len\n" .
+ " { dup normalize dup y-val moveto 0 -2 rlineto stroke % tic\n" .
+ " y-val 10 sub moveto HE10 setfont round prt-n % print label \n" .
+ " } for \n";
+ print "1 setlinewidth\n";
+ print "% " . "-" x 77 . "\n";
+
+}
+
+# -----------------------------------------------------------------------------
+
+sub print_y_axis {
+ local ($i);
+ local ($y, $smax,$majormax, $majorint);
+
+# Y-axis label
+
+ print "% " . ("-" x 75) . "\n";
+ print "% Y-Axis:\n";
+ print "% " . ("-" x 75) . "\n";
+
+ if ( $opt_m ) {
+ print "0 setgray\n";
+ } else {
+ print "0 0 0 setrgbcolor\n";
+ }
+
+ print("gsave\n");
+ print("HE12 setfont\n");
+ print("(tasks)\n");
+ print("dup stringwidth pop\n");
+ print("$ymax\n");
+ print("exch sub\n");
+ print("$labelx exch\n");
+ print("translate\n");
+ print("90 rotate\n");
+ print("0 0 moveto\n");
+ print("show\n");
+ print("grestore\n");
+
+# Scale
+
+ if ($pmax < $majorticks) {
+ $majorticks = $pmax;
+ }
+
+ print "0.5 setlinewidth\n";
+
+ print("HE12 setfont\n$scalex $ymin moveto\n$scalex $ymax lineto\n");
+ print("% Total number of tasks: $pmax\n");
+ print("% Number of ticks: $majorticks\n");
+
+ $y = $ymax; # (($pmax - $ymin)/$majorticks) * ($majorticks-$i) + $ymin;
+ print("$scalex $y moveto\n$major $y lineto\n");
+ print("$markx $y moveto\n($pmax) show\n");
+
+ $majormax = int($pmax/$majorticks)*$majorticks;
+ $smax = $majormax*(($ymax-$ymin)/$pmax)+$ymin;
+ $majorint = $majormax/$majorticks;
+
+ for($i=0; $i <= $majorticks; ++$i) {
+ $y = (($smax - $ymin)/$majorticks) * ($majorticks-$i) + $ymin;
+ $majorval = int($majorint * ($majormax/$majorint-$i));
+ print("$scalex $y moveto\n$major $y lineto\n");
+ print("$markx $y moveto\n($majorval) show\n");
+ }
+
+ # print("$xmin $ymax moveto\n10 0 rlineto\n10 0 rmoveto\n($pmax) show\n");
+ print " stroke\n";
+ print "1 setlinewidth\n";
+ print "% " . ("-" x 75) . "\n";
+}
+
+# ---------------------------------------------------------------------------
+
+sub print_verbose_message {
+
+ print "Prg Name: $pname Date: $date\n";
+ print "Input: stdin Output: stdout\n";
+}
+
+# ----------------------------------------------------------------------------
+
+sub process_options {
+
+ if ( $opt_h ) {
+ open(ME,$0) || die "Can't open myself ($0): $!\n";
+ $n = 0;
+ while (<ME>) {
+ last if $_ =~ /^$/;
+ print $_;
+ $n++;
+ }
+ close(ME);
+ exit ;
+ }
+
+ if ( $opt_s ) {
+ $y_scale = $opt_s;
+ } else {
+ $y_scale = 1;
+ }
+
+ if ( $#ARGV != 3 ) {
+ print "Usage: $0 [options] <max x value> <max y value> <prg name> <date> \n";
+ print "Use -h option to get details\n";
+ exit 1;
+ }
+
+ $tmax = $ARGV[0];
+ $pmax = $ARGV[1];
+ # GUM uses the absolute path (with '=' instead of '/') of the executed file
+ # (for PVM reasons); if you want to have the full path in the generated
+ # graph, too, eliminate the substitution below
+ ($pname = $ARGV[2]) =~ s/.*=//;
+ $date = $ARGV[3];
+
+ if ( $opt_w ) {
+ $width = $opt_w;
+ } else {
+ $width = 0;
+ }
+
+}
+# -----------------------------------------------------------------------------
diff --git a/ghc/utils/parallel/qp2ps.pl b/ghc/utils/parallel/qp2ps.pl
index d671cb8937..2fb090346a 100644
--- a/ghc/utils/parallel/qp2ps.pl
+++ b/ghc/utils/parallel/qp2ps.pl
@@ -1,18 +1,31 @@
#! /usr/local/bin/perl
##############################################################################
+# Time-stamp: <Wed Jul 24 1996 22:04:50 Stardate: [-31]7859.39 hwloidl>
#
-# Usage: qp2ps.pl [options] <max-x> <prg> <date>
+# Usage: qp2ps [options] <max-x> <max-y> <prg> <date>
#
# Filter that transforms a quasi-parallel profile (a .qp file) at stdin to
# a PostScript file at stdout, showing essentially the total number of running,
# runnable and blocked tasks.
#
# Options:
-# -o <file> ... write PS file to <file>
+# -o <file> ... write .ps file to <file>
# -m ... create mono PostScript file instead a color one.
# -O ... compress i.e. try to minimize the size of the .ps file
# -s <str> ... print <str> in the top right corner of the generated graph
# -i <int> ... info level from 1 to 7; number of queues to display
+# -I <str> ... queues to be displayed (in the given order) with the encoding
+# 'a' ... active (running)
+# 'r' ... runnable
+# 'b' ... blocked
+# 'f' ... fetching
+# 'm' ... migrating
+# 's' ... sparks
+# (e.g. -I "arb" shows active, runnable, blocked tasks)
+# -l <int> ... length of a slice in the .ps file; (default: 100)
+# small value => less memory consumption of .ps file & script
+# but slower in generating the .ps file
+# -d ... Print date instead of average parallelism
# -v ... be talkative.
# -h ... print help message (this header).
#
@@ -20,7 +33,7 @@
require "getopts.pl";
-&Getopts('hvDOmSs:i:I:');
+&Getopts('hvDCOmdl:s:i:I:H');
do process_options();
@@ -32,6 +45,8 @@ if ( $opt_v ) {
# Init
# ---------------------------------------------------------------------------
+$y_scaling = 1.0;
+
$xmin = 100;
$xmax = 790;
@@ -41,7 +56,8 @@ $markx = $scalex - 30;
$major = $scalex - 5;
$majorticks = 10;
-$pmax = 1;
+$mmax = 1;
+
$amax = 0;
$ymin = 50;
$ymax = 500;
@@ -49,64 +65,78 @@ $ymax = 500;
$active = 0;
$runnable = 0;
$blocked = 0;
-$sparks = 0;
$fetching = 0;
+$migrating = 0;
+$sparks = 0;
-$lines_per_flush = 100; # depends on the PS implementation you use
+#$lines_per_flush = 100; # depends on the PS implementation you use
-%color = ( "a", "green",
- "r", "amber",
- "b", "red",
- "f", "cyan",
- "m", "blue",
- "s", "crimson" );
+%color = ( "a", "green", # active
+ "r", "amber", # runnable
+ "b", "red", # blocked
+ "f", "cyan", # fetching
+ "m", "blue", # migrating
+ "s", "crimson" ); # sparks
# ---------------------------------------------------------------------------
do print_prolog();
$otime = -1;
-$last_x = -1;
-$last_y = -1;
-$in_seq = 0;
$time_of_second_event = 0;
+$samples = 0;
+
+$T[0] = 0;
+$G[0] = 0;
+$A[0] = 0;
+$R[0] = 0;
+$B[0] = 0;
+$Y[0] = 0;
while(<STDIN>) {
+ next if /^[^0-9]/; # ignore lines not beginning with a digit (esp. last)
chop;
($time, $event, $tid, $addr, $tid2, $addr2) = split;
$time_of_second_event = $time if $time_of_second_event == 0;
if($time != $otime) {
$tottime += $G[$samples] * ($time-$T[$samples]);
+ $otime = $time;
+ }
- if($active > $amax) {
- $amax = $active;
- }
+ if($active > $amax) {
+ $amax = $active;
+ }
- if ( $opt_D ) {
- if($G[$samples] < $amax && $A[$samples] > 0) {
- printf(stderr "%% $otime: G $G[$samples], A $A[$samples], " .
- "R $R[$samples], B $B[$samples], " .
- "Y $Y[$samples]\n");
- }
+ if ( $opt_D ) {
+ if($G[$samples] < $amax && $A[$samples] > 0) {
+ printf(stderr "%% $otime: G $G[$samples], A $A[$samples], " .
+ "R $R[$samples], B $B[$samples], " .
+ "Y $Y[$samples]\n");
}
+ }
- # Reality Check
- if($G[$samples] < 0 || $A[$samples] < 0 ||
- $R[$samples] < 0 || $B[$samples] < 0 ||
- $Y[$samples] < 0) {
- printf(stderr "Error: Impossible number of tasks at time " .
- "$T[$samples] (G $G[$samples], A $A[$samples], ".
- "R $R[$samples], B $B[$samples], Y $Y[$samples])\n");
- }
- $samples++;
- $otime = $time;
+ # Reality Check
+ if($G[$samples] < 0 || $A[$samples] < 0 ||
+ $R[$samples] < 0 || $B[$samples] < 0 ||
+ $Y[$samples] < 0) {
+ printf(stderr "Error: Impossible number of tasks at time " .
+ "$T[$samples] (G $G[$samples], A $A[$samples], ".
+ "R $R[$samples], B $B[$samples], Y $Y[$samples])\n") if $opt_v || $opt_D;
+ if ( $opt_H ) { # HACK
+ $G[$samples] = 0 if $G[$samples] < 0;
+ $A[$samples] = 0 if $A[$samples] < 0;
+ $R[$samples] = 0 if $R[$samples] < 0;
+ $B[$samples] = 0 if $B[$samples] < 0;
+ $Y[$samples] = 0 if $Y[$samples] < 0;
+ }
}
+ $samples++;
$eventfrom = substr($event,0,1);
$eventto = substr($event,1,1);
- printf(stderr "$time $event $eventfrom $eventto\n") if $opt_D;
+ printf(stderr "$time $event $eventfrom $eventto\n") if 0 && $opt_D;
if ($eventfrom eq '*') {
}
@@ -167,27 +197,84 @@ while(<STDIN>) {
$somefetching = 1;
}
- printf(stderr "%% $time: G $active, A $runnable, R $blocked, " .
- "B $sparks, C $migrating\n") if 0;
- $T[$samples] = $time;
- $G[$samples] = &queue_on("a") ? $active : 0;
- $A[$samples] = &queue_on("r") ? $runnable : 0;
- $R[$samples] = &queue_on("b") ? $blocked : 0;
- $Y[$samples] = &queue_on("f") ? $fetching : 0;
- $B[$samples] = &queue_on("s") ? $sparks : 0;
- $C[$samples] = &queue_on("m") ? $migrating : 0;
+ #printf(stderr "%% $time: G $active, A $runnable, R $blocked, " .
+ # "B $sparks, C $migrating\n") if 1;
+
+ printf(stderr "Error: Trying to write at index 0!\n") if $samples == 0;
+ $T[$samples] = $time;
+ do set_values($samples,
+ $active,$runnable,$blocked,$fetching,$sparks,$migrating);
+
+ #$G[$samples] = queue_on_a ? $active : 0;
+ #$A[$samples] = queue_on_r ? $runnable : 0;
+ #$R[$samples] = queue_on_b ? $blocked : 0;
+ #$Y[$samples] = queue_on_f ? $fetching : 0;
+ #$B[$samples] = queue_on_s ? $sparks : 0;
+ #$C[$samples] = queue_on_m ? $migrating : 0;
$all = $G[$samples] + $A[$samples] + $R[$samples] + $Y[$samples] +
$B[$samples] + $C[$samples] ;
- if($all > $pmax) {
- $pmax = $all;
+ if($all > $mmax) {
+ $mmax = $all;
+ }
+
+ if ( 0 ) {
+ print STDERR "%% $time: (act,runnable,blocked,fetch,mig,sp) = " .
+ "($active, $runnable, $blocked, $fetching, $migrating, $sparks)".
+ " max = $all\n" ;
+ }
+
+ #print STDERR "Sparks @ $time: $sparks \tAll: $all \tMMax: $mmax\n" if $opt_D;
+
+ if ( $samples >= $slice_width ) {
+ do flush_queues();
+ $samples = 0;
}
+
+} # <STDIN>
+
+do flush_queues();
+print "%% End\n" if $opt_C;
+
+# For debugging only
+if ($opt_D) {
+ printf(stderr "Queue values after last event: " .
+ "$T[$samples] (G $G[$samples], A $A[$samples], ".
+ "R $R[$samples], B $B[$samples], Y $Y[$samples])\n");
}
if($time != $tmax) {
- die "Error: Calculated time ($time) does not agree with stated max. time ($tmax)\n";
+ if ( $pedantic ) {
+ die "Error: Calculated time ($time) does not agree with stated max. time ($tmax)\n";
+ } else { #
+ print STDERR "Warning: Calculated time ($time) does not agree with stated max. time ($tmax)\n" if $opt_v;
+ }
+}
+
+# HACK warning:
+# The real max-y value ($mmax) might differ from the one that is the input
+# to this script ($pmax). If so, we post-process the generated ps-file
+# and place an appropriate scaling fct into the header of the ps-file.
+# This is done by yet another perl-script:
+# ps-scale-y <y-scaling-factor> <ps-file>
+
+if($pmax != $mmax) {
+ if ( $pedantic ) {
+ die "Error: Calculated max no. of tasks ($mmax) does not agree with stated max. no. of tasks ($pmax)\n";
+ } else {
+ print STDERR "Warning: Calculated max no. of tasks ($mmax) does not agree with stated max. no. of tasks ($pmax)\n" if $opt_v;
+ $y_scaling = $pmax/$mmax; #((float) $pmax)/((float) $mmax);
+ }
+}
+
+print "% " . ("-" x 75) . "\n";
+
+if ( $opt_m ) {
+ print "0 setgray\n";
+} else {
+ print "0 0 0 setrgbcolor\n";
}
# Print optional str
@@ -195,26 +282,32 @@ if($time != $tmax) {
print("HB16 setfont ($opt_s) dup stringwidth pop 790 exch sub 500 moveto show\n");
}
+ print("unscale-y\n");
+
# Average Parallelism
if($time > 0) {
- if ( 0 ) { # HACK warning; is this *always* correct -- HWL
+ if ( $opt_S ) { # HACK warning; is this *always* correct -- HWL
$avg = ($tottime-$time_of_second_event)/($time-$time_of_second_event);
} else {
$avg = $tottime/$time;
}
- $avgs=sprintf("Average Parallelism = %0.1f\n",$avg);
- print("HE14 setfont ($avgs) dup stringwidth pop 790 exch sub 525 moveto show\n");
+ if ( $opt_d ) { # Print date instead of average parallelism
+ print("HE14 setfont ($date) dup stringwidth pop 790 exch sub 515 moveto show\n");
+ } else {
+ $avgs=sprintf("Average Parallelism = %0.1f\n",$avg);
+ print("HE14 setfont ($avgs) dup stringwidth pop 790 exch sub 515 moveto show\n");
+ }
$rt_str=sprintf("Runtime = %0.0f\n",$tmax);
- print("HE14 setfont ($rt_str) dup stringwidth pop 790 exch sub 30 moveto show\n");
+ print("HE14 setfont ($rt_str) dup stringwidth pop 790 exch sub 20 moveto show\n");
}
+# do print_y_axis();
+
# -----------------------------------------------------------------------------
# Draw axes lines etc
# -----------------------------------------------------------------------------
-do print_y_axis();
-
-# if ( ! $opt_S ) {
+if ( ! $opt_S ) {
# Draw dashed line for orientation (startup time) -- HWL
@@ -232,7 +325,7 @@ if ( $draw_lines ) {
# and another one at the second event -- HWL
-print STDERR "Time of second event is: $time_of_second_event" if $opt_D;
+print STDERR "Time of second event is: $time_of_second_event" if 0 && $opt_D;
if ( $draw_lines ) {
local($x, $y);
@@ -249,52 +342,71 @@ if ( $draw_lines ) {
"grestore\n";
}
-# }
-
-# -----------------------------------------------------------------------------
-# Draw the different kinds of tasks
-# -----------------------------------------------------------------------------
-
-$rshow = reverse($show);
-print STDERR "\nReversed info-mask is : $rshow" if $opt_D;
-print STDERR "\nMaximal y value is $pmax" if $opt_D;
-for ($j=0; $j<length($rshow); $j++) {
- $x = substr($rshow,$j,1);
- print STDERR "Queue = $x i.e. " . ($color{$x}) . "\n" if $opt_D;
- print("$xmin $ymin moveto\n");
- for($i=1; $i <= $samples; $i++) {
- do psout($T[$i],&count($x,$i));
- if ($i % $lines_per_flush == 0) {
- print($color{$x} . " flush-it\n");
- }
- }
- # print("$xmax $ymin L\n");
-
- if ( $opt_m ) {
- print "closepath " . ($color{$x}) . " setgray fill\n";
- } else {
- print "closepath " . ($color{$x}) . " setrgbcolor fill\n";
- }
}
# -----------------------------------------------------------------------------
-
# Logo
print("HE14 setfont\n");
-if ( $opt_m ) {
- print("50 530 asciilogo\n");
+if ($opt_m) {
+ print("50 520 asciilogo\n");
} else {
- print("50 530 logo\n");
+ print("50 520 logo\n");
}
# Epilogue
print("showpage\n");
-exit 0;
+if ( $y_scaling != 1.0 ) {
+ print "%% y_scaling: $y_scaling\t max: $mmax\n";
+}
+
+exit 0 ;
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
# -----------------------------------------------------------------------------
+# Draw the current slice of the overall graph.
+# This routine is called if a slice of data is full (i.e. $T[0..$samples],
+# $G[0..$slice_width] etc with $samples==$slice_width contain data from the
+# input file) or if the end of the input has been reached (i.e. $samples<=
+# $slice_width). Note that the last value of the current slice is stored as
+# the first value for the next slice.
+# -----------------------------------------------------------------------------
+
+sub flush_queues {
+ local ($x_norm, $y_norm);
+ local ($index);
+ local ($last_x, $last_y, $in_seq) = (-1, -1, 0);
+ local ($foo_x, $foo_y);
+
+ if ( $samples == 0 ) { return ; }
+
+ # print "% First sample: T $T[0] (G $G[0], A $A[0], ".
+ # " R $R[0], B $B[0], Y $Y[0])\n" if $opt_C;
+
+ $rshow = reverse($show);
+ print STDERR "\nReversed info-mask is : $rshow" if 0 && $opt_D;
+ print STDERR "\nMaximal y value is $pmax" if 0 && $opt_D;
+ for ($j=0; $j<length($rshow); $j++) {
+ $q = substr($rshow,$j,1);
+ # print "% Queue = $q i.e. " . ($color{$q}) . " counts at first sample: " . &count($q,0) ."\n" if $opt_C;
+ do init_psout($q, $T[0], &count($q,0));
+ for($i=1; $i <= $samples; $i++) {
+ do psout($T[$i],&count($q,$i));
+ }
+ print $color{$q} . " F\n";
+ ($foo_x, $foo_y) = &normalize($T[$samples],&count($q,$samples));
+ print "%% Last " . ($color{$q}) . " is " . &get_queue_val($q,$samples) ." (" . $T[$samples] . ", " . &count($q,$samples) . ") -> ($foo_x,$foo_y)\n" if $opt_C;
+ # print($color{$q} . " flush-it\n");
+ # print("$xmax $ymin L\n");
+ }
+ do wrap($samples);
+
+ #print "% Last sample T $T[$samples] (G $G[$samples], A $A[$samples], ".
+ # " R $R[$samples], B $B[$samples], Y $Y[$samples])\n" if $opt_C;
+}
+
+# -----------------------------------------------------------------------------
# Scale the (x,y) point (x is time in cycles, y is no. of tasks) s.t. the
# x-(time-) axis fits between $xmin and $xmax (range for .ps graph).
# In case of optimization ($opt_O):
@@ -305,15 +417,42 @@ exit 0;
# sequence!).
# -----------------------------------------------------------------------------
-sub psout {
+sub normalize {
local($x, $y ) = @_;
+ local($x_norm, $y_norm );
+
if ( $opt_S ) {
- $x = int(( ($x-$time_of_second_event)/($tmax-$time_of_second_event)) * ($xmax-$xmin) + $xmin);
+ $x_norm = int(( ($x-$time_of_second_event)/($tmax-$time_of_second_event)) * ($xmax-$xmin) + $xmin);
} else {
- $x = int(($x/$tmax) * ($xmax-$xmin) + $xmin);
+ $x_norm = int(($x/$tmax) * ($xmax-$xmin) + $xmin);
}
- $y = int(($y/$pmax) * ($ymax-$ymin) + $ymin);
+ $y_norm = int(($y/$pmax) * ($ymax-$ymin) + $ymin);
+
+ return (($x_norm, $y_norm));
+}
+
+# -----------------------------------------------------------------------------
+
+sub init_psout {
+ local ($q, $x, $y) = @_;
+ local ($x_norm, $y_norm);
+ ($last_x, $last_y, $in_seq) = (-1, -1, 0);
+ ($x_norm, $y_norm) = &normalize($T[0],&count($q,0));
+ $last_x = $x_norm;
+ $last_y = $y_norm;
+ print "%% Begin " . ($color{$q}) . " (" . $T[0] . ", " . &count($q,0) . ") -> ($x_norm,$y_norm)\n" if $opt_C;
+ print $x_norm, " ", $y_norm, " M\n";
+
+}
+
+# ----------------------------------------------------------------------------
+
+sub psout {
+ local($x_in, $y_in ) = @_;
+ local($x, $y );
+
+ ($x, $y) = &normalize($x_in, $y_in);
die "Error in psout: Neg x coordinate\n" if ($x < 0) ;
if ( $opt_O ) {
@@ -346,47 +485,99 @@ sub queue_on {
# -----------------------------------------------------------------------------
-sub count{
+sub count {
local ($queue,$index) = @_;
local ($res);
$where = &queue_on($queue);
- $res = ((&queue_on("a") && (&queue_on("a")<=$where)) ? $G[$index] : 0) +
- ((&queue_on("r") && (&queue_on("r")<=$where)) ? $A[$index] : 0) +
- ((&queue_on("b") && (&queue_on("b")<=$where)) ? $R[$index] : 0) +
- ((&queue_on("f") && (&queue_on("f")<=$where)) ? $Y[$index] : 0) +
- ((&queue_on("m") && (&queue_on("m")<=$where)) ? $B[$index] : 0) +
- ((&queue_on("s") && (&queue_on("s")<=$where)) ? $C[$index] : 0);
+ $res = (($queue_on_a && ($queue_on_a<=$where)) ? $G[$index] : 0) +
+ (($queue_on_r && ($queue_on_r<=$where)) ? $A[$index] : 0) +
+ (($queue_on_b && ($queue_on_b<=$where)) ? $R[$index] : 0) +
+ (($queue_on_f && ($queue_on_f<=$where)) ? $Y[$index] : 0) +
+ (($queue_on_m && ($queue_on_m<=$where)) ? $C[$index] : 0) +
+ (($queue_on_s && ($queue_on_s<=$where)) ? $B[$index] : 0);
return $res;
}
# -----------------------------------------------------------------------------
+sub set_values {
+ local ($samples,
+ $active,$runnable,$blocked,$fetching,$sparks,$migrating) = @_;
+
+ $G[$samples] = $queue_on_a ? $active : 0;
+ $A[$samples] = $queue_on_r ? $runnable : 0;
+ $R[$samples] = $queue_on_b ? $blocked : 0;
+ $Y[$samples] = $queue_on_f ? $fetching : 0;
+ $B[$samples] = $queue_on_s ? $sparks : 0;
+ $C[$samples] = $queue_on_m ? $migrating : 0;
+}
+
+# -----------------------------------------------------------------------------
+
+sub set_queue_val {
+ local ($queue,$index,$val) = @_;
+
+ if ( $queue == "a" ) { $G[$index] = $val; }
+ elsif ( $queue == "r" ) { $A[$index] = $val; }
+ elsif ( $queue == "b" ) { $R[$index] = $val; }
+ elsif ( $queue == "f" ) { $Y[$index] = $val; }
+ elsif ( $queue == "m" ) { $C[$index] = $val; }
+ elsif ( $queue == "s" ) { $B[$index] = $val; }
+}
+
+# -----------------------------------------------------------------------------
+
+sub wrap { # used in flush_queues at the end of a slice
+ local ($index) = @_;
+
+ $T[0] = $T[$index];
+
+ $G[0] = $G[$index];
+ $A[0] = $A[$index];
+ $R[0] = $R[$index];
+ $Y[0] = $Y[$index];
+ $B[0] = $B[$index];
+ $C[0] = $C[$index];
+}
+
+# -----------------------------------------------------------------------------
+
+sub get_queue_val {
+ local ($queue,$index) = @_;
+
+ if ( $queue == "a" ) { return $G[$index]; }
+ elsif ( $queue == "r" ) { return $A[$index]; }
+ elsif ( $queue == "b" ) { return $R[$index]; }
+ elsif ( $queue == "f" ) { return $Y[$index]; }
+ elsif ( $queue == "m" ) { return $C[$index]; }
+ elsif ( $queue == "s" ) { return $B[$index]; }
+}
+
+# -----------------------------------------------------------------------------
+
sub get_date {
local ($date);
- open (DATE,"date |") || die ("$!");
- while (<DATE>) {
- $date = $_;
- }
- close (DATE);
-
+ chop($date = `date`);
return ($date);
}
# -----------------------------------------------------------------------------
sub print_prolog {
- local ($date);
+ local ($now);
- $date = do get_date();
+ $now = do get_date();
print("%!PS-Adobe-2.0\n");
print("%%BoundingBox: 0 0 560 800\n");
print("%%Title: Activity Profile\n");
- print("%%Creator: qp2ps.pl\n");
- print("%%CreationDate: $date\n");
+ print("%%Creator: qp2ps\n");
+ print("%%StartTime: $date\n");
+ print("%%CreationDate: $now\n");
+ print("%%Copyright: 1995, 1996 by Hans-Wolfgang Loidl, University of Glasgow\n");
print("%%EndComments\n");
#print ("/greenlineto {1.0 setlinewidth lineto} def\n");
#print ("/amberlineto {0.5 setlinewidth lineto} def\n");
@@ -396,8 +587,8 @@ sub print_prolog {
#print ("/R {newpath moveto redlineto stroke} def\n");
if ( $opt_m ) {
- print "/red { 0.5 } def\n";
- print "/green { 0 } def\n";
+ print "/red { 0 } def\n";
+ print "/green { 0.5 } def\n";
print "/blue { 0.7 } def\n";
print "/crimson { 0.8 } def\n";
print "/amber { 0.9 } def\n";
@@ -434,6 +625,31 @@ sub print_prolog {
print "/cmpy {exch pop 3 2 roll pop eq} def % compare y-coors of 2 points\n";
print "/cmp {2 index eq {exch pop eq} % compare 2 points\n";
print " {pop pop pop false} ifelse } def\n";
+
+ # Hook for scaling just the graph and y-axis
+ print "% " . "-" x 77 . "\n";
+ print "/scale-y { } def\n";
+ print "/unscale-y { } def\n";
+
+ print "% " . "-" x 77 . "\n";
+ print "/str-len 12 def\n";
+ print "/prt-n { cvi str-len string cvs \n" .
+ " dup stringwidth pop \n" .
+ " currentpoint pop 780 gt { 10 sub } { 2 div } ifelse \n" .
+ " neg 0 rmoveto \n" .
+ " show } def \n" .
+ " % print top-of-stack integer centered at the current point\n";
+ # NB: These PostScript functions must correspond to the Perl fct `normalize'
+ # Currently normalize defines the following trafo on (x,y) values:
+ # $x_norm = int(($x/$tmax) * ($xmax-$xmin) + $xmin);
+ # $y_norm = int(($y/$pmax) * ($ymax-$ymin) + $ymin);
+
+ print "/total-len $tmax def\n";
+ print "/show-len $xmax def\n";
+ print "/x-offset $xmin def\n";
+ print "/y-offset $ymin def\n";
+ print "/normalize { total-len div show-len x-offset sub mul x-offset add floor } def\n";
+ print "% " . "-" x 77 . "\n";
print "%/L { lineto } def\n";
print "%/L {2 copy pop 1 sub currentpoint exch pop lineto lineto} def\n";
print "/L {2 copy currentpoint cmpx not\n";
@@ -442,15 +658,17 @@ sub print_prolog {
print " {pop pop} \n";
print " {lineto} ifelse\n";
print "} def\n";
- print "/flush-it { % draw a segment of the overall area; Arg: color\n";
- print " currentpoint \n";
- print " 1 index 50 lineto closepath\n";
+ print "/F { % flush a segment of the overall area; Arg: color\n";
+ print " currentpoint pop $ymin lineto closepath\n";
if ( $opt_m ) {
- print " 3 2 roll setgray fill \n";
+ print " setgray fill \n";
} else {
- print " 5 2 roll setrgbcolor fill \n";
+ print " setrgbcolor fill \n";
}
- print " 1 index 50 moveto lineto \n";
+ print "} def\n";
+ print "/M { % Start drawing a slice (vert. line and moveto startpoint)\n";
+ print " % Arg: x y\n";
+ print " newpath 1 index $ymin moveto lineto\n";
print "} def\n";
print "% For debugging PS uncomment this line and add the file behandler.ps\n";
print "% $brkpage begin printonly endprint \n";
@@ -520,31 +738,31 @@ sub print_prolog {
$x_now = $x_begin;
- if ( &queue_on("a") ) {
+ if ( $queue_on_a ) {
do print_box_and_label($x_now,$y_label,"green","running");
}
- if ( &queue_on("r") ) {
+ if ( $queue_on_r ) {
$x_now += $step;
do print_box_and_label($x_now,$y_label,"amber","runnable");
}
- if ( &queue_on("f") ) {
+ if ( $queue_on_f ) {
$x_now += $step;
do print_box_and_label($x_now,$y_label,"cyan","fetching");
}
- if ( &queue_on("b") ) {
+ if ( $queue_on_b ) {
$x_now += $step;
do print_box_and_label($x_now,$y_label,"red","blocked");
}
- if ( &queue_on("m") ) {
+ if ( $queue_on_m ) {
$x_now += $step;
do print_box_and_label($x_now,$y_label,"blue","migrating");
}
- if ( &queue_on("s") ) {
+ if ( $queue_on_s ) {
$x_now += $step;
do print_box_and_label($x_now,$y_label,"crimson","sparked");
}
@@ -554,7 +772,21 @@ sub print_prolog {
#print("680 10 moveto\n");
#print("(RT: $tmax) show\n");
- print("-40 -20 translate\n");
+ print("-40 -10 translate\n");
+
+ do print_x_axis();
+
+ print("$xmin $ymin moveto\n");
+ if ( $opt_m ) {
+ print "0 setgray\n";
+ } else {
+ print "0 0 0 setrgbcolor\n";
+ }
+
+ do print_y_axis();
+
+ print("scale-y\n");
+
}
# -----------------------------------------------------------------------------
@@ -585,11 +817,36 @@ sub print_box_and_label {
# -----------------------------------------------------------------------------
+sub print_x_axis {
+
+ print "% " . "-" x 77 . "\n";
+ print "% X-Axis:\n";
+ print "/y-val $ymin def\n";
+ print "0.5 setlinewidth\n";
+ print "x-offset y-val moveto total-len normalize x-offset sub 0 rlineto stroke\n";
+ print "0 total-len 10 div total-len\n" .
+ " { dup normalize dup y-val moveto 0 -2 rlineto stroke % tic\n" .
+ " y-val 10 sub moveto HE10 setfont round prt-n % print label \n" .
+ " } for \n";
+ print "1 setlinewidth\n";
+ print "% End X-Axis:\n";
+ print "% " . "-" x 77 . "\n";
+}
+
+# -----------------------------------------------------------------------------
+
sub print_y_axis {
local ($i);
+ local ($y, $smax,$majormax, $majorint);
# Y-axis label
+ print "% " . ("-" x 75) . "\n";
+ print "% Y-Axis:\n";
+ print "% " . ("-" x 75) . "\n";
+
+ print("%scale-y % y-axis outside scaled area if ps-scale-y rebuilds it!\n");
+
print("gsave\n");
print("HE12 setfont\n");
print("(tasks)\n");
@@ -605,23 +862,25 @@ sub print_y_axis {
# Scale
- if ( $opt_m ) {
- print "0 setgray\n";
- } else {
- print "0 0 0 setrgbcolor\n";
+ if ($pmax < $majorticks) {
+ $majorticks = $pmax;
}
print("HE12 setfont\n$scalex $ymin moveto\n$scalex $ymax lineto\n");
+ print("% Max number of tasks: $pmax\n");
+ print("% Number of ticks: $majorticks\n");
- if ($pmax < $majorticks) {
- $majorticks = $pmax;
- }
+ print "0.5 setlinewidth\n";
+
+ $y = $ymax; # (($pmax - $ymin)/$majorticks) * ($majorticks-$i) + $ymin;
+ print("$scalex $y moveto\n$major $y lineto\n");
+ print("$markx $y moveto\n($pmax) show\n");
$majormax = int($pmax/$majorticks)*$majorticks;
$smax = $majormax*(($ymax-$ymin)/$pmax)+$ymin;
$majorint = $majormax/$majorticks;
- for($i=0; $i <= $majorticks; ++$i) {
+ for($i=1; $i <= $majorticks; ++$i) {
$y = (($smax - $ymin)/$majorticks) * ($majorticks-$i) + $ymin;
$majorval = int($majorint * ($majormax/$majorint-$i));
print("$scalex $y moveto\n$major $y lineto\n");
@@ -630,14 +889,32 @@ sub print_y_axis {
# print("$xmin $ymax moveto\n10 0 rlineto\n10 0 rmoveto\n($pmax) show\n");
print " stroke\n";
+ print "1 setlinewidth\n";
+ print "%unscale-y\n";
+ print "% End Y-Axis.\n";
+ print "% " . ("-" x 75) . "\n";
}
# -----------------------------------------------------------------------------
sub print_verbose_message {
- print "Prg Name: $pname Date: $date Info-str: $show\n";
- print "Input: stdin Output: stdout\n";
+ print STDERR "Prg Name: $pname \nDate: $date \nInfo-str: $show\n";
+ print STDERR "Input: stdin Output: stdout\n";
+ print STDERR "The following queues are turned on: " .
+ ( $queue_on_a ? "active, " : "") .
+ ( $queue_on_r ? "runnable, " : "") .
+ ( $queue_on_b ? "blocked, " : "") .
+ ( $queue_on_f ? "fetching, " : "") .
+ ( $queue_on_m ? "migrating, " : "") .
+ ( $queue_on_s ? "sparks" : "") .
+ "\n";
+ if ( $opt_C ) {
+ print STDERR "Inserting check code into .ps file (for check-ps3 script)\n";
+ }
+ if ( $opt_D ) {
+ print STDERR "Debugging is turned ON!\n";
+ }
}
# ----------------------------------------------------------------------------
@@ -656,23 +933,22 @@ sub process_options {
exit ;
}
- if ( $#ARGV != 2 ) {
- print "Usage: $0 [options] <max y value> <prg name> <date> \n";
+ if ( $#ARGV != 3 ) {
+ print "Usage: $0 [options] <max x value> <max y value> <prg name> <date> \n";
print "Use -h option to get details\n";
exit 1;
}
$tmax = $ARGV[0];
- $pname = $ARGV[1];
- $date = $ARGV[2];
+ $pmax = $ARGV[1];
+ # GUM uses the absolute path (with '=' instead of '/') of the executed file
+ # (for PVM reasons); if you want to have the full path in the generated
+ # graph, too, eliminate the substitution below
+ ($pname = $ARGV[2]) =~ s/.*=//;
+ $date = $ARGV[3];
$show = "armfb";
-
- if ( $opt_S ) {
- $draw_lines = 1;
- } else {
- $draw_lines = 0;
- }
+ $draw_lines = 0;
if ( $opt_i ) {
$show = "a" if info_level == 1;
@@ -691,123 +967,22 @@ sub process_options {
$verbose = 1;
}
-# if ($#ARGV == 0) {
-# printf(stderr "usage: qp2ps.pl runtime [prog [date]]\n");
-# exit 1;
-# }
-}
-
-# -----------------------------------------------------------------------------
-# Old way of drawing areas
-# -----------------------------------------------------------------------------
-
-exit 0;
-
-# Blocked Tasks
-if ($someblocked && ($info_level >= 3)) {
- print("$xmin $ymin moveto\n");
- for($i=($opt_S ? 2 : 1); $i <= $samples; $i++) {
- do psout($T[$i],$G[$i]+$A[$i]+$C[$i]+$B[$i]+$Y[$i]+$R[$i]);
- if ($i % $lines_per_flush == 0) {
- print("red flush-it\n");
- }
- }
- # print("$xmax $ymin L\n");
-
- if ( $opt_m ) {
- print "closepath red setgray fill\n";
- } else {
- print "closepath red setrgbcolor fill\n";
- }
-}
-
-# Fetching Tasks
-if ($somefetching && ($info_level >= 4)) {
- print("$xmin $ymin moveto\n");
- for($i=($opt_S ? 2 : 1); $i <= $samples; $i++) {
- do psout($T[$i],$G[$i]+$A[$i]+$C[$i]+$B[$i]+$Y[$i]);
- if ($i % $lines_per_flush == 0) {
- print("cyan flush-it\n");
- }
- }
- # print("$xmax $ymin L\n");
-
- if ( $opt_m ) {
- print "closepath cyan setgray fill\n";
- } else {
- print "closepath cyan setrgbcolor fill\n";
- }
-}
-
-# Sparks
-if ($somesparks && ($info_level >= 6)) {
- print("$xmin $ymin moveto\n");
- for($i=($opt_S ? 2 : 1); $i <= $samples; $i++) {
- do psout($T[$i],$G[$i]+$A[$i]+$C[$i]+$B[$i]);
- if ($i % $lines_per_flush == 0) {
- print("crimson flush-it\n");
- }
- }
- # print("$xmax $ymin L\n");
-
- if ( $opt_m ) {
- print "closepath crimson setgray fill\n";
- } else {
- print "closepath crimson setrgbcolor fill\n";
- }
-}
-
-# Migrating Threads
-if ($somemigratory && ($info_level >= 5)) {
- print("$xmin $ymin moveto\n");
- for($i=($opt_S ? 2 : 1); $i <= $samples; $i++) {
- do psout($T[$i],$G[$i]+$A[$i]+$C[$i]);
- if ($i % $lines_per_flush == 0) {
- print("blue flush-it\n");
- }
- }
- # print("$xmax $ymin L\n");
- # print("closepath\ngsave\n0.9 setgray\nfill\ngrestore\nstroke\n");
- if ( $opt_m ) {
- print "closepath blue setgray fill\n";
+ if ( $opt_l ) {
+ $slice_width = $opt_l;
} else {
- print "closepath blue setrgbcolor fill\n";
+ $slice_width = 500;
}
-}
-# Runnable Tasks
-if($somerunnable && ($info_level >= 2)) {
- print("$xmin $ymin moveto\n");
- for($i=($opt_S ? 2 : 1); $i <= $samples; $i++) {
- do psout($T[$i],$G[$i]+$A[$i]);
- if ($i % $lines_per_flush == 0) {
- print("amber flush-it\n");
- }
- }
- # print("$xmax $ymin L\n");
- # print("closepath\ngsave\n0.9 setgray\nfill\ngrestore\nstroke\n");
- if ( $opt_m ) {
- print "closepath amber setgray fill\n";
- } else {
- print "closepath amber setrgbcolor fill\n";
- }
-}
+ $queue_on_a = &queue_on("a");
+ $queue_on_r = &queue_on("r");
+ $queue_on_b = &queue_on("b");
+ $queue_on_f = &queue_on("f");
+ $queue_on_s = &queue_on("s");
+ $queue_on_m = &queue_on("m");
-# Active Tasks
-if ($info_level >= 1) {
- print("$xmin $ymin moveto\n");
- for($i=($opt_S ? 2 : 1); $i <= $samples; $i++) {
- do psout($T[$i],$G[$i]);
- if ($i % $lines_per_flush == 0) {
- print("green flush-it\n");
- }
- }
- # print("$xmax $ymin L\n");
- # print("closepath\ngsave\n0.5 setgray\nfill\ngrestore\nstroke\n");
- if ( $opt_m ) {
- print "closepath green setgray fill\n";
- } else {
- print "closepath green setrgbcolor fill\n";
- }
+# if ($#ARGV == 0) {
+# printf(stderr "usage: qp2ps.pl runtime [prog [date]]\n");
+# exit 1;
+# }
}
diff --git a/ghc/utils/parallel/sn_filter.pl b/ghc/utils/parallel/sn_filter.pl
new file mode 100644
index 0000000000..4bfc2d1721
--- /dev/null
+++ b/ghc/utils/parallel/sn_filter.pl
@@ -0,0 +1,92 @@
+#!/usr/local/bin/perl
+# ############################################################################
+# Time-stamp: <Wed Jun 19 1996 12:26:21 Stardate: [-31]7682.38 hwloidl>
+#
+# Usage: sn_filter [options] <gr-file> <sn>
+#
+# Extract all events out of <gr-file> that are related to threads whose
+# spark name component is <sn>.
+#
+# Options:
+# -H ... Print header of the <gr-file>, too
+# -h ... print help message (this text)
+# -v ... be talkative
+#
+# ############################################################################
+
+$gran_dir = $ENV{'GRANDIR'};
+if ( $gran_dir eq "" ) {
+ print STDERR "Warning: Env variable GRANDIR is undefined\n";
+}
+
+push(@INC, $gran_dir, $gran_dir . "/bin");
+# print STDERR "INC: " . join(':',@INC) . "\n";
+
+require "get_SN";
+require "getopts.pl";
+
+&Getopts('hvH');
+
+do process_options();
+if ( $opt_v ) { do print_verbose_message(); }
+
+# ----------------------------------------------------------------------------
+
+do get_SN($input);
+
+open (FILE,$input) || die "Can't open $file\n";
+
+$in_header = 1;
+while (<FILE>) {
+ print if $in_header && $opt_H;
+ $in_header = 0 if /^\++$/;
+ next if $in_header;
+ next unless /^PE\s*\d+\s*\[\d+\]:\s*\w*\s*([0-9a-fx]+)/;
+ $id = $1;
+ # print STDERR "$id --> " . $id2sn{hex($id)} . " sn: $sn ==> " . ($sn eq $id2sn{hex($id)}) . "\n";
+ print if $sn == $id2sn{hex($id)};
+}
+
+close (FILE);
+
+exit 0;
+
+# ----------------------------------------------------------------------------
+
+sub process_options {
+
+ if ( $#ARGV != 1 ) {
+ die "Usage: sn_filter <gr-file> <sn>\n";
+ }
+
+ $input = $ARGV[0];
+ $sn = $ARGV[1];
+
+ print STDERR "File: |$file|; sn: |$sn|\n" if $opt_v;
+
+ if ( $opt_h ) {
+ open (ME,$0) || die "!$: $0";
+ while (<ME>) {
+ last if /^$/;
+ print;
+ }
+ close (ME);
+ exit 1;
+ }
+}
+
+# ----------------------------------------------------------------------------
+
+sub print_verbose_message {
+
+ print "Input: $input\tOutput: stdout\tSN: $sn\n";
+ if ( $opt_H ) {
+ print "Prepending .gr header to the output.\n";
+ }
+
+}
+
+# ----------------------------------------------------------------------------
+
+
+
diff --git a/ghc/utils/parallel/stats.pl b/ghc/utils/parallel/stats.pl
new file mode 100644
index 0000000000..6cf826b5cd
--- /dev/null
+++ b/ghc/utils/parallel/stats.pl
@@ -0,0 +1,168 @@
+#!/usr/local/bin/perl
+##############################################################################
+# Time-stamp: <Sat Oct 28 1995 23:15:13 Stardate: [-31]6509.63 hwloidl>
+#
+# Usage: do ....
+#
+# Statistics package that is used in gran-extr, RTS2gran and friends.
+# Most of the routines assume a list of integers as input.
+# This package contains:
+# - corr
+# - mean_std_dev
+# - cov
+# - list_sum
+# - list_max
+# - list_min
+#
+##############################################################################
+
+# ----------------------------------------------------------------------------
+# Compute correlation of 2 vectors, having their sums precomputed.
+# Usage: do corr(($n, $sum_1, @rest);
+#
+# Input: $n ... number of all elements in @list_1 as well as in @list_2
+# (i.e. $n = $#list_1+1 = $#list_2+1).
+# $sum_1 ... sum of all elements in @list_1
+# @list_1 ... list of integers; first vector
+# $sum_2 ... sum of all elements in @list_2
+# @list_2 ... list of integers; first vector
+# Output: correlation of @list_1 and @list_2
+# ----------------------------------------------------------------------------
+
+sub corr {
+ local ($n, $sum_1, @rest) = @_;
+ local (@list_1) = splice(@rest,0,$n);
+ local ($sum_2, @list_2) = @rest;
+
+ local ($mean_1,$mean_2,$std_dev_1,$std_dev_2);
+
+ if ( $opt_D ) {
+ print "\ncorr: n=$n sum_1=$sum_1 sum_2=$sum_2\n";
+ print " list_sum of list_1=" . &list_sum(@list_1) .
+ " list_sum of list_2=" . &list_sum(@list_2) . "\n";
+ print " len of list_1=$#list_1 len of list_2=$#list_2\n";
+ }
+
+ ($mean_1, $std_dev_1) = &mean_std_dev($sum_1,@list_1);
+ ($mean_2, $std_dev_2) = &mean_std_dev($sum_2,@list_2);
+
+ if ( $opt_D ) {
+ print "corr: $mean_1, $std_dev_1; $mean_2, $std_dev_2\n";
+ }
+
+ return ( ($std_dev_1 * $std_dev_2) == 0 ?
+ 0 :
+ &cov($n, $mean_1, @list_1, $mean_2, @list_2) /
+ ( $std_dev_1 * $std_dev_2 ) );
+}
+
+# ----------------------------------------------------------------------------
+
+sub mean_std_dev {
+ local ($sum,@list) = @_;
+ local ($n, $s, $s_);
+
+ #print "\nmean_std_dev: sum is $sum ; list has length $#list";
+
+ $n = $#list+1;
+ $mean_value = $sum/$n;
+
+ $s_ = 0;
+ foreach $x (@list) {
+ $s_ += $x;
+ $s += ($mean_value - $x) ** 2;
+ }
+ if ( $sum != $s_ ) {
+ print "stat.pl: ERROR in mean_std_dev: provided sum is wrong " .
+ "(provided: $sum; computed: $s_ " .
+ ";list_sum: " . &list_sum(@list) . "\n";
+ exit (2);
+ }
+
+ return ( ($mean_value, sqrt($s / ($n - 1)) ) );
+}
+
+# ----------------------------------------------------------------------------
+
+sub _mean_std_dev {
+ return ( &mean_std_dev(&list_sum(@_), @_) );
+}
+
+# ----------------------------------------------------------------------------
+# Compute covariance of 2 vectors, having their sums precomputed.
+# Input: $n ... number of all elements in @list_1 as well as in @list_2
+# (i.e. $n = $#list_1+1 = $#list_2+1).
+# $mean_1 ... mean value of all elements in @list_1
+# @list_1 ... list of integers; first vector
+# $mean_2 ... mean value of all elements in @list_2
+# @list_2 ... list of integers; first vector
+# Output: covariance of @list_1 and @list_2
+# ----------------------------------------------------------------------------
+
+sub cov {
+ local ($n, $mean_1, @rest) = @_;
+ local (@list_1) = splice(@rest,0,$n);
+ local ($mean_2, @list_2) = @rest;
+
+ local ($i,$s,$s_1,$s_2);
+
+ for ($i=0; $i<$n; $i++) {
+ $s_1 += $list_1[$i];
+ $s_2 += $list_2[$i];
+ $s += ($mean_1 - $list_1[$i]) * ($mean_2 - $list_2[$i]);
+ }
+ if ( $mean_1 != ($s_1/$n) ) {
+ print "stat.pl: ERROR in cov: provided mean value is wrong " .
+ "(provided: $mean_1; computed: " . ($s_1/$n) . ")\n";
+ exit (2);
+ }
+ if ( $mean_2 != ($s_2/$n) ) {
+ print "stat.pl: ERROR in cov: provided mean value is wrong " .
+ "(provided: $mean_2; computed: " . ($s_2/$n) . ")\n";
+ exit (2);
+ }
+ return ( $s / ($n - 1) ) ;
+}
+
+# ---------------------------------------------------------------------------
+
+sub list_sum {
+ local (@list) = @_;
+ local ($sum) = (0);
+
+ foreach $x (@list) {
+ $sum += $x;
+ }
+
+ return ($sum);
+}
+
+# ----------------------------------------------------------------------------
+
+sub list_max {
+ local (@list) = @_;
+ local ($max) = shift;
+
+ foreach $x (@list) {
+ $max = $x if $x > $max;
+ }
+
+ return ($max);
+}
+
+# ----------------------------------------------------------------------------
+
+sub list_min {
+ local (@list) = @_;
+ local ($min) = shift;
+
+ foreach $x (@list) {
+ $min = $x if $x < $min;
+ }
+
+ return ($min);
+}
+
+# ----------------------------------------------------------------------------
+
+1;
diff --git a/ghc/utils/parallel/template.pl b/ghc/utils/parallel/template.pl
new file mode 100644
index 0000000000..21391d7cd0
--- /dev/null
+++ b/ghc/utils/parallel/template.pl
@@ -0,0 +1,141 @@
+#!/usr/local/bin/perl
+##############################################################################
+# Time-stamp: <Sat Oct 28 1995 23:00:47 Stardate: [-31]6509.58 hwloidl>
+#
+# Usage: do read_template(<template_file_name>,<input_file_name>);
+#
+# Read the template file <template_file_name> as defined in /dev/null.
+# Set global variables as defined in the template file.
+# This is mainly used in gran-extr and RTS2gran.
+#
+##############################################################################
+
+require "aux.pl";
+
+sub read_template {
+ local ($org_templ_file_name,$input) = @_;
+ local ($f,$templ_file_name);
+
+ # Resolve name
+ $gran_dir = $ENV{GRANDIR} ? $ENV{GRANDIR} : $ENV{HOME} ;
+ $templ_file_name = ( $org_templ_file_name eq '.' ? "TEMPL"
+ #^^^ default file name
+ : $org_templ_file_name eq ',' ? $gran_dir . "/bin/TEMPL"
+ #^^^ global master template
+ : $org_templ_file_name eq '/' ? $gran_dir . "/bin/T0"
+ #^^ template, that throws away most of the info
+ : $org_templ_file_name );
+
+ if ( $opt_v ) {
+ print "Reading template file $templ_file_name ...\n";
+ }
+
+ ($f = ($input eq "-" ? "stdin" : $input)) =~ s/.rts//;
+
+ open(TEMPLATE,"cat $templ_file_name | sed -e 's/\$0/$f/' |")
+ || die "Couldn't open file $templ_file_name";
+
+ while (<TEMPLATE>) {
+ next if /^\s*$/ || /^--/;
+ if (/^\s*G[:,;.\s]+([^\n]+)$/) {
+ $list_str = $1;
+ $list_str =~ s/[\(\)\[\]]//g;
+ @exec_times = split(/[,;. ]+/, $list_str);
+ } elsif (/^\s*F[:,;.\s]+([^\n]+)$/) {
+ $list_str = $1;
+ $list_str =~ s/[\(\)\[\]]//g;
+ @fetch_times = split(/[,;. ]+/, $list_str);
+ } elsif (/^\s*A[:,;.\s]+([^\n]+)$/) {
+ $list_str = $1;
+ $list_str =~ s/[\(\)\[\]]//g;
+ @has = split(/[,;. ]+/, $list_str);
+ } elsif (/^\s*C[:,;.\s]+([^\n]+)$/) {
+ $list_str = $1;
+ $list_str =~ s/[\(\)\[\]]//g;
+ @comm_percs = split(/[,;. ]+/, $list_str);
+ } elsif (/^\s*S[:,;.\s]+([^\n]+)$/) {
+ $list_str = $1;
+ $list_str =~ s/[\(\)\[\]]//g;
+ @sparks = split(/[,;. ]+/, $list_str);
+ } elsif (/^\s*g[:,;.\s]+([\S]+)$/) {
+ ($gran_file_name,$gran_global_file_name, $gran_local_file_name) =
+ &mk_global_local_names($1);
+ } elsif (/^\s*f[:,;.\s]+([\S]+)$/) {
+ ($ft_file_name,$ft_global_file_name, $ft_local_file_name) =
+ &mk_global_local_names($1);
+ } elsif (/^\s*c[:,;.\s]+([\S]+)$/) {
+ ($comm_file_name, $comm_global_file_name, $comm_local_file_name) =
+ &mk_global_local_names($1);
+ } elsif (/^\s*s[:,;.\s]+([\S]+)$/) {
+ ($spark_file_name, $spark_global_file_name, $spark_local_file_name) =
+ &mk_global_local_names($1);
+ } elsif (/^\s*a[:,;.\s]+([\S]+)$/) {
+ ($ha_file_name, $ha_global_file_name, $ha_local_file_name) =
+ &mk_global_local_names($1);
+ } elsif (/^\s*p[:,;.\s]+([\S]+)$/) {
+ $gp_file_name = $1;
+ # $ps_file_name = &dat2ps_name($gp_file_name);
+ } elsif (/^\s*Xcorr[:,;.\s]+([\S]+)$/) {
+ $corr_file_name = $1;
+ } elsif (/^\s*Xcumulat-rts[:,;.\s]+([\S]+)$/) {
+ $cumulat_rts_file_name = $1;
+ ($cumulat0_rts_file_name = $1) =~ s/\./0./;
+ } elsif (/^\s*Xcumulat-has[:,;.\s]+([\S]+)$/) {
+ $cumulat_has_file_name = $1;
+ } elsif (/^\s*Xcumulat-fts[:,;.\s]+([\S]+)$/) {
+ $cumulat_fts_file_name = $1;
+ } elsif (/^\s*Xcumulat-cps[:,;.\s]+([\S]+)$/) {
+ $cumulat_cps_file_name = $1;
+ } elsif (/^\s*Xclust-rts[:,;.\s]+([\S]+)$/) {
+ $clust_rts_file_name = $1;
+ } elsif (/^\s*Xclust-has[:,;.\s]+([\S]+)$/) {
+ $clust_has_file_name = $1;
+ } elsif (/^\s*Xclust-fts[:,;.\s]+([\S]+)$/) {
+ $clust_fts_file_name = $1;
+ } elsif (/^\s*Xclust-cps[:,;.\s]+([\S]+)$/) {
+ $clust_cps_file_name = $1;
+ } elsif (/^\s*Xpe[:,;.\s]+([\S]+)$/) {
+ $pe_file_name = $1;
+ } elsif (/^\s*Xsn[:,;.\s]+([\S]+)$/) {
+ $sn_file_name = $1;
+
+ } elsif (/^\s*XRTS[:,;.\s]+([\S]+)$/) {
+ $rts_file_name = $1;
+ } elsif (/^\s*XHAS[:,;.\s]+([\S]+)$/) {
+ $has_file_name = $1;
+ } elsif (/^\s*XFTS[:,;.\s]+([\S]+)$/) {
+ $fts_file_name = $1;
+ } elsif (/^\s*XLSPS[:,;.\s]+([\S]+)$/) {
+ $lsps_file_name = $1;
+ } elsif (/^\s*XGSPS[:,;.\s]+([\S]+)$/) {
+ $gsps_file_name = $1;
+ } elsif (/^\s*XCPS[:,;.\s]+([\S]+)$/) {
+ $cps_file_name = $1;
+ } elsif (/^\s*XCCPS[:,;.\s]+([\S]+)$/) {
+ $ccps_file_name = $1;
+
+ } elsif (/^\s*\-[:,;.\s]+([\S]+)$/) {
+ $input = $1;
+ } elsif (/^\s*L[:,;\s]+(.*)$/) {
+ $str = $1;
+ %logscale = ('g',"xy",'a',"xy",'Cg',"xy",'Ca',"xy",'Yp',"y",'Ys',"y") , next if $str eq ".";
+ $str =~ s/[\(\)\[\]]//g;
+ %logscale = split(/[,;. ]+/, $str);
+ } elsif (/^\s*i[:,;.\s]+([\S]+)$/) {
+ $gray = $1;
+ } elsif (/^\s*k[:,;.\s]+([\S]+)$/) {
+ $no_of_clusters = $1;
+ } elsif (/^\s*e[:,;.\s]+([\S]+)$/) {
+ $ext_size = $1;
+ } elsif (/^\s*v.*$/) {
+ $verbose = 1;
+ } elsif (/^\s*T.*$/) {
+ $opt_T = 1;
+ }
+ }
+ close(TEMPLATE);
+}
+
+# ----------------------------------------------------------------------------
+
+1;
diff --git a/ghc/utils/parallel/tf.pl b/ghc/utils/parallel/tf.pl
new file mode 100644
index 0000000000..40cff09f2c
--- /dev/null
+++ b/ghc/utils/parallel/tf.pl
@@ -0,0 +1,148 @@
+#!/usr/local/bin/perl
+# ############################################################################
+# Time-stamp: <Fri Aug 25 1995 23:17:43 Stardate: [-31]6189.64 hwloidl>
+# (C) Hans Wolfgang Loidl, November 1994
+#
+# Usage: tf [options] <gr-file>
+#
+# Show the `taskflow' in the .gr file (especially useful for keeping track of
+# migrated tasks. It's also possible to focus on a given PE or on a given
+# event.
+#
+# Options:
+# -p <int> ... Print all events on PE <int>
+# -t <int> ... Print all events that occur on task <int>
+# -e <str> ... Print all <str> events
+# -n <hex> ... Print all events about fetching the node at address <hex>.
+# -s <int> ... Print all events with a spark name <int>
+# -L ... Print all events with spark queue length information
+# -H ... Print header of the <gr-file>, too
+# -h ... print help message (this text)
+# -v ... be talkative
+#
+# ############################################################################
+
+# ----------------------------------------------------------------------------
+# Command line processing and initialization
+# ----------------------------------------------------------------------------
+
+require "getopts.pl";
+
+&Getopts('hvHLp:t:e:n:s:S:');
+
+do process_options();
+
+if ( $opt_v ) {
+ do print_verbose_message();
+}
+
+# ----------------------------------------------------------------------------
+
+$in_header = 1;
+while (<>) {
+ if ( $opt_H && $in_header ) {
+ print;
+ $in_header = 0 if /^\+\+\+\+\+/;
+ }
+ next unless /^PE/;
+ @c = split(/[\s\[\]:;,]+/);
+ if ( ( $check_proc ? $proc eq $c[1] : 1 ) &&
+ ( $check_event ? $event eq $c[3] : 1 ) &&
+ ( $check_task ? $task eq $c[4] : 1) &&
+ ( $check_node ? $node eq $c[5] : 1) &&
+ ( $check_spark ? (("END" eq $c[3]) && ($spark eq $c[6])) : 1) &&
+ ( $negated_spark ? (("END" eq $c[3]) && ($spark ne $c[6])) : 1) &&
+ ( $spark_queue_len ? ($c[5] =~ /sparks/) : 1 ) ) {
+ print;
+ }
+}
+
+exit 0;
+
+# ----------------------------------------------------------------------------
+
+sub process_options {
+
+ if ( $opt_p ne "" ) {
+ $check_proc = 1;
+ $proc = $opt_p;
+ }
+
+ if ( $opt_t ne "" ) {
+ $check_task = 1;
+ $task = $opt_t;
+ }
+
+ if ( $opt_e ne "" ) {
+ $check_event = 1;
+ $event = $opt_e;
+ }
+
+ if ( $opt_n ne "" ) {
+ $check_node = 1;
+ $node = $opt_n
+ }
+
+ if ( $opt_s ne "" ) {
+ $check_spark = 1;
+ $spark = $opt_s
+ }
+
+ if ( $opt_S ne "" ) {
+ $negated_spark = 1;
+ $spark = $opt_S
+ }
+
+ if ( $opt_L ) {
+ $spark_queue_len = 1;
+ } else {
+ $spark_queue_len = 0;
+ }
+
+ if ( $opt_h ) {
+ open (ME,$0) || die "!$: $0";
+ while (<ME>) {
+ last if /^$/;
+ print;
+ }
+ close (ME);
+ exit 1;
+ }
+}
+
+# ----------------------------------------------------------------------------
+
+sub print_verbose_message {
+
+ if ( $opt_p ne "" ) {
+ print "Processor: $proc\n";
+ }
+
+ if ( $opt_t ne "" ) {
+ print "Task: $task\n";
+ }
+
+ if ( $opt_e ne "" ) {
+ print "Event: $event\n";
+ }
+
+ if ( $opt_n ne "" ) {
+ print "Node: $node\n";
+ }
+
+ if ( $opt_s ne "" ) {
+ print "Spark: $spark\n";
+ }
+
+ if ( $opt_S ne "" ) {
+ print "Negated Spark: $spark\n";
+ }
+
+ if ( $opt_L ne "" ) {
+ print "Printing spark queue len info.\n";
+ }
+
+}
+
+# ----------------------------------------------------------------------------
+
diff --git a/ghc/utils/pvm/README b/ghc/utils/pvm/README
index a45840500a..5ab58ddec8 100644
--- a/ghc/utils/pvm/README
+++ b/ghc/utils/pvm/README
@@ -2,6 +2,3 @@
comes with PVM 3.3.7.
Less sure about "debugger.emacs"...
-
-Will Partain
-95/07/24