1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
|
<?xml version="1.0" encoding="iso-8859-1"?>
<sect1 id="runtime-control">
<title>Running a compiled program</title>
<indexterm><primary>runtime control of Haskell programs</primary></indexterm>
<indexterm><primary>running, compiled program</primary></indexterm>
<indexterm><primary>RTS options</primary></indexterm>
<para>To make an executable program, the GHC system compiles your
code and then links it with a non-trivial runtime system (RTS),
which handles storage management, profiling, etc.</para>
<para>If you use the <literal>-rtsopts</literal> flag when linking,
you have some control over the behaviour of the RTS, by giving
special command-line arguments to your program.</para>
<para>When your Haskell program starts up, its RTS extracts
command-line arguments bracketed between
<option>+RTS</option><indexterm><primary><option>+RTS</option></primary></indexterm>
and
<option>-RTS</option><indexterm><primary><option>-RTS</option></primary></indexterm>
as its own. For example:</para>
<screen>
% ./a.out -f +RTS -p -S -RTS -h foo bar
</screen>
<para>The RTS will snaffle <option>-p</option> <option>-S</option>
for itself, and the remaining arguments <literal>-f -h foo bar</literal>
will be handed to your program if/when it calls
<function>System.getArgs</function>.</para>
<para>No <option>-RTS</option> option is required if the
runtime-system options extend to the end of the command line, as in
this example:</para>
<screen>
% hls -ltr /usr/etc +RTS -A5m
</screen>
<para>If you absolutely positively want all the rest of the options
in a command line to go to the program (and not the RTS), use a
<option>––RTS</option><indexterm><primary><option>--RTS</option></primary></indexterm>.</para>
<para>As always, for RTS options that take
<replaceable>size</replaceable>s: If the last character of
<replaceable>size</replaceable> is a K or k, multiply by 1000; if an
M or m, by 1,000,000; if a G or G, by 1,000,000,000. (And any
wraparound in the counters is <emphasis>your</emphasis>
fault!)</para>
<para>Giving a <literal>+RTS -f</literal>
<indexterm><primary><option>-f</option></primary><secondary>RTS option</secondary></indexterm> option
will print out the RTS options actually available in your program
(which vary, depending on how you compiled).</para>
<para>NOTE: since GHC is itself compiled by GHC, you can change RTS
options in the compiler using the normal
<literal>+RTS ... -RTS</literal>
combination. eg. to increase the maximum heap
size for a compilation to 128M, you would add
<literal>+RTS -M128m -RTS</literal>
to the command line.</para>
<sect2 id="rts-options-environment">
<title>Setting global RTS options</title>
<indexterm><primary>RTS options</primary><secondary>from the environment</secondary></indexterm>
<indexterm><primary>environment variable</primary><secondary>for
setting RTS options</secondary></indexterm>
<para>When the <literal>-rtsopts</literal> flag is used when linking,
RTS options are also taken from the environment variable
<envar>GHCRTS</envar><indexterm><primary><envar>GHCRTS</envar></primary>
</indexterm>. For example, to set the maximum heap size
to 128M for all GHC-compiled programs (using an
<literal>sh</literal>-like shell):</para>
<screen>
GHCRTS='-M128m'
export GHCRTS
</screen>
<para>RTS options taken from the <envar>GHCRTS</envar> environment
variable can be overridden by options given on the command
line.</para>
</sect2>
<sect2 id="rts-options-misc">
<title>Miscellaneous RTS options</title>
<variablelist>
<varlistentry>
<term><option>-V<replaceable>secs</replaceable></option>
<indexterm><primary><option>-V</option></primary><secondary>RTS
option</secondary></indexterm></term>
<listitem>
<para>Sets the interval that the RTS clock ticks at. The
runtime uses a single timer signal to count ticks; this timer
signal is used to control the context switch timer (<xref
linkend="using-concurrent" />) and the heap profiling
timer <xref linkend="rts-options-heap-prof" />. Also, the
time profiler uses the RTS timer signal directly to record
time profiling samples.</para>
<para>Normally, setting the <option>-V</option> option
directly is not necessary: the resolution of the RTS timer is
adjusted automatically if a short interval is requested with
the <option>-C</option> or <option>-i</option> options.
However, setting <option>-V</option> is required in order to
increase the resolution of the time profiler.</para>
<para>Using a value of zero disables the RTS clock
completely, and has the effect of disabling timers that
depend on it: the context switch timer and the heap profiling
timer. Context switches will still happen, but
deterministically and at a rate much faster than normal.
Disabling the interval timer is useful for debugging, because
it eliminates a source of non-determinism at runtime.</para>
</listitem>
</varlistentry>
<varlistentry>
<term><option>--install-signal-handlers=<replaceable>yes|no</replaceable></option>
<indexterm><primary><option>--install-signal-handlers</option></primary><secondary>RTS
option</secondary></indexterm></term>
<listitem>
<para>If yes (the default), the RTS installs signal handlers to catch
things like ctrl-C. This option is primarily useful for when
you are using the Haskell code as a DLL, and want to set your
own signal handlers.</para>
<para>Note that even
with <option>--install-signal-handlers=no</option>, the RTS
interval timer signal is still enabled. The timer signal
is either SIGVTALRM or SIGALRM, depending on the RTS
configuration and OS capabilities. To disable the timer
signal, use the <literal>-V0</literal> RTS option (see
above).
</para>
</listitem>
</varlistentry>
<varlistentry>
<term><option>-xm<replaceable>address</replaceable></option>
<indexterm><primary><option>-xm</option></primary><secondary>RTS
option</secondary></indexterm></term>
<listitem>
<para>
WARNING: this option is for working around memory
allocation problems only. Do not use unless GHCi fails
with a message like “<literal>failed to mmap() memory below 2Gb</literal>”. If you need to use this option to get GHCi working
on your machine, please file a bug.
</para>
<para>
On 64-bit machines, the RTS needs to allocate memory in the
low 2Gb of the address space. Support for this across
different operating systems is patchy, and sometimes fails.
This option is there to give the RTS a hint about where it
should be able to allocate memory in the low 2Gb of the
address space. For example, <literal>+RTS -xm20000000
-RTS</literal> would hint that the RTS should allocate
starting at the 0.5Gb mark. The default is to use the OS's
built-in support for allocating memory in the low 2Gb if
available (e.g. <literal>mmap</literal>
with <literal>MAP_32BIT</literal> on Linux), or
otherwise <literal>-xm40000000</literal>.
</para>
</listitem>
</varlistentry>
</variablelist>
</sect2>
<sect2 id="rts-options-gc">
<title>RTS options to control the garbage collector</title>
<indexterm><primary>garbage collector</primary><secondary>options</secondary></indexterm>
<indexterm><primary>RTS options</primary><secondary>garbage collection</secondary></indexterm>
<para>There are several options to give you precise control over
garbage collection. Hopefully, you won't need any of these in
normal operation, but there are several things that can be tweaked
for maximum performance.</para>
<variablelist>
<varlistentry>
<term>
<option>-A</option><replaceable>size</replaceable>
<indexterm><primary><option>-A</option></primary><secondary>RTS option</secondary></indexterm>
<indexterm><primary>allocation area, size</primary></indexterm>
</term>
<listitem>
<para>[Default: 512k] Set the allocation area size
used by the garbage collector. The allocation area
(actually generation 0 step 0) is fixed and is never resized
(unless you use <option>-H</option>, below).</para>
<para>Increasing the allocation area size may or may not
give better performance (a bigger allocation area means
worse cache behaviour but fewer garbage collections and less
promotion).</para>
<para>With only 1 generation (<option>-G1</option>) the
<option>-A</option> option specifies the minimum allocation
area, since the actual size of the allocation area will be
resized according to the amount of data in the heap (see
<option>-F</option>, below).</para>
</listitem>
</varlistentry>
<varlistentry>
<term>
<option>-c</option>
<indexterm><primary><option>-c</option></primary><secondary>RTS option</secondary></indexterm>
<indexterm><primary>garbage collection</primary><secondary>compacting</secondary></indexterm>
<indexterm><primary>compacting garbage collection</primary></indexterm>
</term>
<listitem>
<para>Use a compacting algorithm for collecting the oldest
generation. By default, the oldest generation is collected
using a copying algorithm; this option causes it to be
compacted in-place instead. The compaction algorithm is
slower than the copying algorithm, but the savings in memory
use can be considerable.</para>
<para>For a given heap size (using the <option>-H</option>
option), compaction can in fact reduce the GC cost by
allowing fewer GCs to be performed. This is more likely
when the ratio of live data to heap size is high, say
>30%.</para>
<para>NOTE: compaction doesn't currently work when a single
generation is requested using the <option>-G1</option>
option.</para>
</listitem>
</varlistentry>
<varlistentry>
<term><option>-c</option><replaceable>n</replaceable></term>
<listitem>
<para>[Default: 30] Automatically enable
compacting collection when the live data exceeds
<replaceable>n</replaceable>% of the maximum heap size
(see the <option>-M</option> option). Note that the maximum
heap size is unlimited by default, so this option has no
effect unless the maximum heap size is set with
<option>-M</option><replaceable>size</replaceable>. </para>
</listitem>
</varlistentry>
<varlistentry>
<term>
<option>-F</option><replaceable>factor</replaceable>
<indexterm><primary><option>-F</option></primary><secondary>RTS option</secondary></indexterm>
<indexterm><primary>heap size, factor</primary></indexterm>
</term>
<listitem>
<para>[Default: 2] This option controls the amount
of memory reserved for the older generations (and in the
case of a two space collector the size of the allocation
area) as a factor of the amount of live data. For example,
if there was 2M of live data in the oldest generation when
we last collected it, then by default we'll wait until it
grows to 4M before collecting it again.</para>
<para>The default seems to work well here. If you have
plenty of memory, it is usually better to use
<option>-H</option><replaceable>size</replaceable> than to
increase
<option>-F</option><replaceable>factor</replaceable>.</para>
<para>The <option>-F</option> setting will be automatically
reduced by the garbage collector when the maximum heap size
(the <option>-M</option><replaceable>size</replaceable>
setting) is approaching.</para>
</listitem>
</varlistentry>
<varlistentry>
<term>
<option>-G</option><replaceable>generations</replaceable>
<indexterm><primary><option>-G</option></primary><secondary>RTS option</secondary></indexterm>
<indexterm><primary>generations, number of</primary></indexterm>
</term>
<listitem>
<para>[Default: 2] Set the number of generations
used by the garbage collector. The default of 2 seems to be
good, but the garbage collector can support any number of
generations. Anything larger than about 4 is probably not a
good idea unless your program runs for a
<emphasis>long</emphasis> time, because the oldest
generation will hardly ever get collected.</para>
<para>Specifying 1 generation with <option>+RTS -G1</option>
gives you a simple 2-space collector, as you would expect.
In a 2-space collector, the <option>-A</option> option (see
above) specifies the <emphasis>minimum</emphasis> allocation
area size, since the allocation area will grow with the
amount of live data in the heap. In a multi-generational
collector the allocation area is a fixed size (unless you
use the <option>-H</option> option, see below).</para>
</listitem>
</varlistentry>
<varlistentry>
<term>
<option>-qg<optional><replaceable>gen</replaceable></optional></option>
<indexterm><primary><option>-qg</option><secondary>RTS
option</secondary></primary></indexterm>
</term>
<listitem>
<para>[New in GHC 6.12.1] [Default: 0]
Use parallel GC in
generation <replaceable>gen</replaceable> and higher.
Omitting <replaceable>gen</replaceable> turns off the
parallel GC completely, reverting to sequential GC.</para>
<para>The default parallel GC settings are usually suitable
for parallel programs (i.e. those
using <literal>par</literal>, Strategies, or with multiple
threads). However, it is sometimes beneficial to enable
the parallel GC for a single-threaded sequential program
too, especially if the program has a large amount of heap
data and GC is a significant fraction of runtime. To use
the parallel GC in a sequential program, enable the
parallel runtime with a suitable <literal>-N</literal>
option, and additionally it might be beneficial to
restrict parallel GC to the old generation
with <literal>-qg1</literal>.</para>
</listitem>
</varlistentry>
<varlistentry>
<term>
<option>-qb<optional><replaceable>gen</replaceable></optional></option>
<indexterm><primary><option>-qb</option><secondary>RTS
option</secondary></primary></indexterm>
</term>
<listitem>
<para>
[New in GHC 6.12.1] [Default: 1] Use
load-balancing in the parallel GC in
generation <replaceable>gen</replaceable> and higher.
Omitting <replaceable>gen</replaceable> disables
load-balancing entirely.</para>
<para>
Load-balancing shares out the work of GC between the
available cores. This is a good idea when the heap is
large and we need to parallelise the GC work, however it
is also pessimal for the short young-generation
collections in a parallel program, because it can harm
locality by moving data from the cache of the CPU where is
it being used to the cache of another CPU. Hence the
default is to do load-balancing only in the
old-generation. In fact, for a parallel program it is
sometimes beneficial to disable load-balancing entirely
with <literal>-qb</literal>.
</para>
</listitem>
</varlistentry>
<varlistentry>
<term>
<option>-H</option><replaceable>size</replaceable>
<indexterm><primary><option>-H</option></primary><secondary>RTS option</secondary></indexterm>
<indexterm><primary>heap size, suggested</primary></indexterm>
</term>
<listitem>
<para>[Default: 0] This option provides a
“suggested heap size” for the garbage collector. The
garbage collector will use about this much memory until the
program residency grows and the heap size needs to be
expanded to retain reasonable performance.</para>
<para>By default, the heap will start small, and grow and
shrink as necessary. This can be bad for performance, so if
you have plenty of memory it's worthwhile supplying a big
<option>-H</option><replaceable>size</replaceable>. For
improving GC performance, using
<option>-H</option><replaceable>size</replaceable> is
usually a better bet than
<option>-A</option><replaceable>size</replaceable>.</para>
</listitem>
</varlistentry>
<varlistentry>
<term>
<option>-I</option><replaceable>seconds</replaceable>
<indexterm><primary><option>-I</option></primary>
<secondary>RTS option</secondary>
</indexterm>
<indexterm><primary>idle GC</primary>
</indexterm>
</term>
<listitem>
<para>(default: 0.3) In the threaded and SMP versions of the RTS (see
<option>-threaded</option>, <xref linkend="options-linker" />), a
major GC is automatically performed if the runtime has been idle
(no Haskell computation has been running) for a period of time.
The amount of idle time which must pass before a GC is performed is
set by the <option>-I</option><replaceable>seconds</replaceable>
option. Specifying <option>-I0</option> disables the idle GC.</para>
<para>For an interactive application, it is probably a good idea to
use the idle GC, because this will allow finalizers to run and
deadlocked threads to be detected in the idle time when no Haskell
computation is happening. Also, it will mean that a GC is less
likely to happen when the application is busy, and so
responsiveness may be improved. However, if the amount of live data in
the heap is particularly large, then the idle GC can cause a
significant delay, and too small an interval could adversely affect
interactive responsiveness.</para>
<para>This is an experimental feature, please let us know if it
causes problems and/or could benefit from further tuning.</para>
</listitem>
</varlistentry>
<varlistentry>
<term>
<option>-ki</option><replaceable>size</replaceable>
<indexterm><primary><option>-k</option></primary><secondary>RTS option</secondary></indexterm>
<indexterm><primary>stack, initial size</primary></indexterm>
</term>
<listitem>
<para>
[Default: 1k] Set the initial stack size for new
threads. (Note: this flag used to be
simply <option>-k</option>, but was renamed
to <option>-ki</option> in GHC 7.2.1. The old name is
still accepted for backwards compatibility, but that may
be removed in a future version).
</para>
<para>
Thread stacks (including the main thread's stack) live on
the heap. As the stack grows, new stack chunks are added
as required; if the stack shrinks again, these extra stack
chunks are reclaimed by the garbage collector. The
default initial stack size is deliberately small, in order
to keep the time and space overhead for thread creation to
a minimum, and to make it practical to spawn threads for
even tiny pieces of work.
</para>
</listitem>
</varlistentry>
<varlistentry>
<term>
<option>-kc</option><replaceable>size</replaceable>
<indexterm><primary><option>-kc</option></primary><secondary>RTS
option</secondary></indexterm>
<indexterm><primary>stack</primary><secondary>chunk size</secondary></indexterm>
</term>
<listitem>
<para>
[Default: 32k] Set the size of “stack
chunks”. When a thread's current stack overflows, a
new stack chunk is created and added to the thread's
stack, until the limit set by <option>-K</option> is
reached.
</para>
<para>
The advantage of smaller stack chunks is that the garbage
collector can avoid traversing stack chunks if they are
known to be unmodified since the last collection, so
reducing the chunk size means that the garbage collector
can identify more stack as unmodified, and the GC overhead
might be reduced. On the other hand, making stack chunks
too small adds some overhead as there will be more
overflow/underflow between chunks. The default setting of
32k appears to be a reasonable compromise in most cases.
</para>
</listitem>
</varlistentry>
<varlistentry>
<term>
<option>-kb</option><replaceable>size</replaceable>
<indexterm><primary><option>-kc</option></primary><secondary>RTS
option</secondary></indexterm>
<indexterm><primary>stack</primary><secondary>chunk buffer size</secondary></indexterm>
</term>
<listitem>
<para>
[Default: 1k] Sets the stack chunk buffer size.
When a stack chunk overflows and a new stack chunk is
created, some of the data from the previous stack chunk is
moved into the new chunk, to avoid an immediate underflow
and repeated overflow/underflow at the boundary. The
amount of stack moved is set by the <option>-kb</option>
option.
</para>
<para>
Note that to avoid wasting space, this value should
typically be less than 10% of the size of a stack
chunk (<option>-kc</option>), because in a chain of stack
chunks, each chunk will have a gap of unused space of this
size.
</para>
</listitem>
</varlistentry>
<varlistentry>
<term>
<option>-K</option><replaceable>size</replaceable>
<indexterm><primary><option>-K</option></primary><secondary>RTS option</secondary></indexterm>
<indexterm><primary>stack, maximum size</primary></indexterm>
</term>
<listitem>
<para>[Default: 8M] Set the maximum stack size for
an individual thread to <replaceable>size</replaceable>
bytes. If the thread attempts to exceed this limit, it will
be send the <literal>StackOverflow</literal> exception.
</para>
<para>
This option is there mainly to stop the program eating up
all the available memory in the machine if it gets into an
infinite loop.
</para>
</listitem>
</varlistentry>
<varlistentry>
<term>
<option>-m</option><replaceable>n</replaceable>
<indexterm><primary><option>-m</option></primary><secondary>RTS option</secondary></indexterm>
<indexterm><primary>heap, minimum free</primary></indexterm>
</term>
<listitem>
<para>Minimum % <replaceable>n</replaceable> of heap
which must be available for allocation. The default is
3%.</para>
</listitem>
</varlistentry>
<varlistentry>
<term>
<option>-M</option><replaceable>size</replaceable>
<indexterm><primary><option>-M</option></primary><secondary>RTS option</secondary></indexterm>
<indexterm><primary>heap size, maximum</primary></indexterm>
</term>
<listitem>
<para>[Default: unlimited] Set the maximum heap size to
<replaceable>size</replaceable> bytes. The heap normally
grows and shrinks according to the memory requirements of
the program. The only reason for having this option is to
stop the heap growing without bound and filling up all the
available swap space, which at the least will result in the
program being summarily killed by the operating
system.</para>
<para>The maximum heap size also affects other garbage
collection parameters: when the amount of live data in the
heap exceeds a certain fraction of the maximum heap size,
compacting collection will be automatically enabled for the
oldest generation, and the <option>-F</option> parameter
will be reduced in order to avoid exceeding the maximum heap
size.</para>
</listitem>
</varlistentry>
<varlistentry>
<term>
<option>-t</option><optional><replaceable>file</replaceable></optional>
<indexterm><primary><option>-t</option></primary><secondary>RTS option</secondary></indexterm>
</term>
<term>
<option>-s</option><optional><replaceable>file</replaceable></optional>
<indexterm><primary><option>-s</option></primary><secondary>RTS option</secondary></indexterm>
</term>
<term>
<option>-S</option><optional><replaceable>file</replaceable></optional>
<indexterm><primary><option>-S</option></primary><secondary>RTS option</secondary></indexterm>
</term>
<term>
<option>--machine-readable</option>
<indexterm><primary><option>--machine-readable</option></primary><secondary>RTS option</secondary></indexterm>
</term>
<listitem>
<para>These options produce runtime-system statistics, such
as the amount of time spent executing the program and in the
garbage collector, the amount of memory allocated, the
maximum size of the heap, and so on. The three
variants give different levels of detail:
<option>-t</option> produces a single line of output in the
same format as GHC's <option>-Rghc-timing</option> option,
<option>-s</option> produces a more detailed summary at the
end of the program, and <option>-S</option> additionally
produces information about each and every garbage
collection.</para>
<para>The output is placed in
<replaceable>file</replaceable>. If
<replaceable>file</replaceable> is omitted, then the output
is sent to <constant>stderr</constant>.</para>
<para>
If you use the <literal>-t</literal> flag then, when your
program finishes, you will see something like this:
</para>
<programlisting>
<<ghc: 36169392 bytes, 69 GCs, 603392/1065272 avg/max bytes residency (2 samples), 3M in use, 0.00 INIT (0.00 elapsed), 0.02 MUT (0.02 elapsed), 0.07 GC (0.07 elapsed) :ghc>>
</programlisting>
<para>
This tells you:
</para>
<itemizedlist>
<listitem>
<para>
The total number of bytes allocated by the program over the
whole run.
</para>
</listitem>
<listitem>
<para>
The total number of garbage collections performed.
</para>
</listitem>
<listitem>
<para>
The average and maximum "residency", which is the amount of
live data in bytes. The runtime can only determine the
amount of live data during a major GC, which is why the
number of samples corresponds to the number of major GCs
(and is usually relatively small). To get a better picture
of the heap profile of your program, use
the <option>-hT</option> RTS option
(<xref linkend="rts-profiling" />).
</para>
</listitem>
<listitem>
<para>
The peak memory the RTS has allocated from the OS.
</para>
</listitem>
<listitem>
<para>
The amount of CPU time and elapsed wall clock time while
initialising the runtime system (INIT), running the program
itself (MUT, the mutator), and garbage collecting (GC).
</para>
</listitem>
</itemizedlist>
<para>
You can also get this in a more future-proof, machine readable
format, with <literal>-t --machine-readable</literal>:
</para>
<programlisting>
[("bytes allocated", "36169392")
,("num_GCs", "69")
,("average_bytes_used", "603392")
,("max_bytes_used", "1065272")
,("num_byte_usage_samples", "2")
,("peak_megabytes_allocated", "3")
,("init_cpu_seconds", "0.00")
,("init_wall_seconds", "0.00")
,("mutator_cpu_seconds", "0.02")
,("mutator_wall_seconds", "0.02")
,("GC_cpu_seconds", "0.07")
,("GC_wall_seconds", "0.07")
]
</programlisting>
<para>
If you use the <literal>-s</literal> flag then, when your
program finishes, you will see something like this (the exact
details will vary depending on what sort of RTS you have, e.g.
you will only see profiling data if your RTS is compiled for
profiling):
</para>
<programlisting>
36,169,392 bytes allocated in the heap
4,057,632 bytes copied during GC
1,065,272 bytes maximum residency (2 sample(s))
54,312 bytes maximum slop
3 MB total memory in use (0 MB lost due to fragmentation)
Generation 0: 67 collections, 0 parallel, 0.04s, 0.03s elapsed
Generation 1: 2 collections, 0 parallel, 0.03s, 0.04s elapsed
SPARKS: 359207 (557 converted, 149591 pruned)
INIT time 0.00s ( 0.00s elapsed)
MUT time 0.01s ( 0.02s elapsed)
GC time 0.07s ( 0.07s elapsed)
EXIT time 0.00s ( 0.00s elapsed)
Total time 0.08s ( 0.09s elapsed)
%GC time 89.5% (75.3% elapsed)
Alloc rate 4,520,608,923 bytes per MUT second
Productivity 10.5% of total user, 9.1% of total elapsed
</programlisting>
<itemizedlist>
<listitem>
<para>
The "bytes allocated in the heap" is the total bytes allocated
by the program over the whole run.
</para>
</listitem>
<listitem>
<para>
GHC uses a copying garbage collector by default. "bytes copied
during GC" tells you how many bytes it had to copy during
garbage collection.
</para>
</listitem>
<listitem>
<para>
The maximum space actually used by your program is the
"bytes maximum residency" figure. This is only checked during
major garbage collections, so it is only an approximation;
the number of samples tells you how many times it is checked.
</para>
</listitem>
<listitem>
<para>
The "bytes maximum slop" tells you the most space that is ever
wasted due to the way GHC allocates memory in blocks. Slop is
memory at the end of a block that was wasted. There's no way
to control this; we just like to see how much memory is being
lost this way.
</para>
</listitem>
<listitem>
<para>
The "total memory in use" tells you the peak memory the RTS has
allocated from the OS.
</para>
</listitem>
<listitem>
<para>
Next there is information about the garbage collections done.
For each generation it says how many garbage collections were
done, how many of those collections were done in parallel,
the total CPU time used for garbage collecting that generation,
and the total wall clock time elapsed while garbage collecting
that generation.
</para>
</listitem>
<listitem>
<para>The <literal>SPARKS</literal> statistic refers to the
use of <literal>Control.Parallel.par</literal> and related
functionality in the program. Each spark represents a call
to <literal>par</literal>; a spark is "converted" when it is
executed in parallel; and a spark is "pruned" when it is
found to be already evaluated and is discarded from the pool
by the garbage collector. Any remaining sparks are
discarded at the end of execution, so "converted" plus
"pruned" does not necessarily add up to the total.</para>
</listitem>
<listitem>
<para>
Next there is the CPU time and wall clock time elapsed broken
down by what the runtime system was doing at the time.
INIT is the runtime system initialisation.
MUT is the mutator time, i.e. the time spent actually running
your code.
GC is the time spent doing garbage collection.
RP is the time spent doing retainer profiling.
PROF is the time spent doing other profiling.
EXIT is the runtime system shutdown time.
And finally, Total is, of course, the total.
</para>
<para>
%GC time tells you what percentage GC is of Total.
"Alloc rate" tells you the "bytes allocated in the heap" divided
by the MUT CPU time.
"Productivity" tells you what percentage of the Total CPU and wall
clock elapsed times are spent in the mutator (MUT).
</para>
</listitem>
</itemizedlist>
<para>
The <literal>-S</literal> flag, as well as giving the same
output as the <literal>-s</literal> flag, prints information
about each GC as it happens:
</para>
<programlisting>
Alloc Copied Live GC GC TOT TOT Page Flts
bytes bytes bytes user elap user elap
528496 47728 141512 0.01 0.02 0.02 0.02 0 0 (Gen: 1)
[...]
524944 175944 1726384 0.00 0.00 0.08 0.11 0 0 (Gen: 0)
</programlisting>
<para>
For each garbage collection, we print:
</para>
<itemizedlist>
<listitem>
<para>
How many bytes we allocated this garbage collection.
</para>
</listitem>
<listitem>
<para>
How many bytes we copied this garbage collection.
</para>
</listitem>
<listitem>
<para>
How many bytes are currently live.
</para>
</listitem>
<listitem>
<para>
How long this garbage collection took (CPU time and elapsed
wall clock time).
</para>
</listitem>
<listitem>
<para>
How long the program has been running (CPU time and elapsed
wall clock time).
</para>
</listitem>
<listitem>
<para>
How many page faults occured this garbage collection.
</para>
</listitem>
<listitem>
<para>
How many page faults occured since the end of the last garbage
collection.
</para>
</listitem>
<listitem>
<para>
Which generation is being garbage collected.
</para>
</listitem>
</itemizedlist>
</listitem>
</varlistentry>
</variablelist>
</sect2>
<sect2>
<title>RTS options for concurrency and parallelism</title>
<para>The RTS options related to concurrency are described in
<xref linkend="using-concurrent" />, and those for parallelism in
<xref linkend="parallel-options"/>.</para>
</sect2>
<sect2 id="rts-profiling">
<title>RTS options for profiling</title>
<para>Most profiling runtime options are only available when you
compile your program for profiling (see
<xref linkend="prof-compiler-options" />, and
<xref linkend="rts-options-heap-prof" /> for the runtime options).
However, there is one profiling option that is available
for ordinary non-profiled executables:</para>
<variablelist>
<varlistentry>
<term>
<option>-hT</option>
<indexterm><primary><option>-hT</option></primary><secondary>RTS
option</secondary></indexterm>
</term>
<listitem>
<para>Generates a basic heap profile, in the
file <literal><replaceable>prog</replaceable>.hp</literal>.
To produce the heap profile graph,
use <command>hp2ps</command> (see <xref linkend="hp2ps"
/>). The basic heap profile is broken down by data
constructor, with other types of closures (functions, thunks,
etc.) grouped into broad categories
(e.g. <literal>FUN</literal>, <literal>THUNK</literal>). To
get a more detailed profile, use the full profiling
support (<xref linkend="profiling" />).</para>
</listitem>
</varlistentry>
</variablelist>
</sect2>
<sect2 id="rts-eventlog">
<title>Tracing</title>
<indexterm><primary>tracing</primary></indexterm>
<indexterm><primary>events</primary></indexterm>
<indexterm><primary>eventlog files</primary></indexterm>
<para>
When the program is linked with the <option>-eventlog</option>
option (<xref linkend="options-linker" />), runtime events can
be logged in two ways:
</para>
<itemizedlist>
<listitem>
<para>
In binary format to a file for later analysis by a
variety of tools. One such tool
is <ulink url="http://hackage.haskell.org/package/ThreadScope">ThreadScope</ulink><indexterm><primary>ThreadScope</primary></indexterm>,
which interprets the event log to produce a visual parallel
execution profile of the program.
</para>
</listitem>
<listitem>
<para>
As text to standard output, for debugging purposes.
</para>
</listitem>
</itemizedlist>
<variablelist>
<varlistentry>
<term>
<option>-l<optional><replaceable>flags</replaceable></optional></option>
<indexterm><primary><option>-l</option></primary><secondary>RTS option</secondary></indexterm>
</term>
<listitem>
<para>
Log events in binary format to the
file <filename><replaceable>program</replaceable>.eventlog</filename>,
where <replaceable>flags</replaceable> is a sequence of
zero or more characters indicating which kinds of events
to log. Currently there is only one type
supported: <literal>-ls</literal>, for scheduler events.
</para>
<para>
The format of the log file is described by the header
<filename>EventLogFormat.h</filename> that comes with
GHC, and it can be parsed in Haskell using
the <ulink url="http://hackage.haskell.org/package/ghc-events">ghc-events</ulink>
library. To dump the contents of
a <literal>.eventlog</literal> file as text, use the
tool <literal>show-ghc-events</literal> that comes with
the <ulink url="http://hackage.haskell.org/package/ghc-events">ghc-events</ulink>
package.
</para>
</listitem>
</varlistentry>
<varlistentry>
<term>
<option>-v</option><optional><replaceable>flags</replaceable></optional>
<indexterm><primary><option>-v</option></primary><secondary>RTS option</secondary></indexterm>
</term>
<listitem>
<para>
Log events as text to standard output, instead of to
the <literal>.eventlog</literal> file.
The <replaceable>flags</replaceable> are the same as
for <option>-l</option>, with the additional
option <literal>t</literal> which indicates that the
each event printed should be preceded by a timestamp value
(in the binary <literal>.eventlog</literal> file, all
events are automatically associated with a timestamp).
</para>
</listitem>
</varlistentry>
</variablelist>
<para>
The debugging
options <option>-D<replaceable>x</replaceable></option> also
generate events which are logged using the tracing framework.
By default those events are dumped as text to stdout
(<option>-D<replaceable>x</replaceable></option>
implies <option>-v</option>), but they may instead be stored in
the binary eventlog file by using the <option>-l</option>
option.
</para>
</sect2>
<sect2 id="rts-options-debugging">
<title>RTS options for hackers, debuggers, and over-interested
souls</title>
<indexterm><primary>RTS options, hacking/debugging</primary></indexterm>
<para>These RTS options might be used (a) to avoid a GHC bug,
(b) to see “what's really happening”, or
(c) because you feel like it. Not recommended for everyday
use!</para>
<variablelist>
<varlistentry>
<term>
<option>-B</option>
<indexterm><primary><option>-B</option></primary><secondary>RTS option</secondary></indexterm>
</term>
<listitem>
<para>Sound the bell at the start of each (major) garbage
collection.</para>
<para>Oddly enough, people really do use this option! Our
pal in Durham (England), Paul Callaghan, writes: “Some
people here use it for a variety of
purposes—honestly!—e.g., confirmation that the
code/machine is doing something, infinite loop detection,
gauging cost of recently added code. Certain people can even
tell what stage [the program] is in by the beep
pattern. But the major use is for annoying others in the
same office…”</para>
</listitem>
</varlistentry>
<varlistentry>
<term>
<option>-D</option><replaceable>x</replaceable>
<indexterm><primary>-D</primary><secondary>RTS option</secondary></indexterm>
</term>
<listitem>
<para>
An RTS debugging flag; only availble if the program was
linked with the <option>-debug</option> option. Various
values of <replaceable>x</replaceable> are provided to
enable debug messages and additional runtime sanity checks
in different subsystems in the RTS, for
example <literal>+RTS -Ds -RTS</literal> enables debug
messages from the scheduler.
Use <literal>+RTS -?</literal> to find out which
debug flags are supported.
</para>
<para>
Debug messages will be sent to the binary event log file
instead of stdout if the <option>-l</option> option is
added. This might be useful for reducing the overhead of
debug tracing.
</para>
</listitem>
</varlistentry>
<varlistentry>
<term>
<option>-r</option><replaceable>file</replaceable>
<indexterm><primary><option>-r</option></primary><secondary>RTS option</secondary></indexterm>
<indexterm><primary>ticky ticky profiling</primary></indexterm>
<indexterm><primary>profiling</primary><secondary>ticky ticky</secondary></indexterm>
</term>
<listitem>
<para>Produce “ticky-ticky” statistics at the
end of the program run (only available if the program was
linked with <option>-debug</option>).
The <replaceable>file</replaceable> business works just like
on the <option>-S</option> RTS option, above.</para>
<para>For more information on ticky-ticky profiling, see
<xref linkend="ticky-ticky"/>.</para>
</listitem>
</varlistentry>
<varlistentry>
<term>
<option>-xc</option>
<indexterm><primary><option>-xc</option></primary><secondary>RTS option</secondary></indexterm>
</term>
<listitem>
<para>(Only available when the program is compiled for
profiling.) When an exception is raised in the program,
this option causes the current cost-centre-stack to be
dumped to <literal>stderr</literal>.</para>
<para>This can be particularly useful for debugging: if your
program is complaining about a <literal>head []</literal>
error and you haven't got a clue which bit of code is
causing it, compiling with <literal>-prof
-auto-all</literal> and running with <literal>+RTS -xc
-RTS</literal> will tell you exactly the call stack at the
point the error was raised.</para>
<para>The output contains one line for each exception raised
in the program (the program might raise and catch several
exceptions during its execution), where each line is of the
form:</para>
<screen>
< cc<subscript>1</subscript>, ..., cc<subscript>n</subscript> >
</screen>
<para>each <literal>cc</literal><subscript>i</subscript> is
a cost centre in the program (see <xref
linkend="cost-centres"/>), and the sequence represents the
“call stack” at the point the exception was
raised. The leftmost item is the innermost function in the
call stack, and the rightmost item is the outermost
function.</para>
</listitem>
</varlistentry>
<varlistentry>
<term>
<option>-Z</option>
<indexterm><primary><option>-Z</option></primary><secondary>RTS option</secondary></indexterm>
</term>
<listitem>
<para>Turn <emphasis>off</emphasis> “update-frame
squeezing” at garbage-collection time. (There's no
particularly good reason to turn it off, except to ensure
the accuracy of certain data collected regarding thunk entry
counts.)</para>
</listitem>
</varlistentry>
</variablelist>
</sect2>
<sect2>
<title>Linker flags to change RTS behaviour</title>
<indexterm><primary>RTS behaviour, changing</primary></indexterm>
<para>
GHC lets you exercise rudimentary control over the RTS settings
for any given program, by using the <literal>-with-rtsopts</literal>
linker flag. For example, to set <literal>-H128m -K1m</literal>,
link with <literal>-with-rtsopts="-H128m -K1m"</literal>.
</para>
</sect2>
<sect2 id="rts-hooks">
<title>“Hooks” to change RTS behaviour</title>
<indexterm><primary>hooks</primary><secondary>RTS</secondary></indexterm>
<indexterm><primary>RTS hooks</primary></indexterm>
<indexterm><primary>RTS behaviour, changing</primary></indexterm>
<para>GHC lets you exercise rudimentary control over the RTS
settings for any given program, by compiling in a
“hook” that is called by the run-time system. The RTS
contains stub definitions for all these hooks, but by writing your
own version and linking it on the GHC command line, you can
override the defaults.</para>
<para>Owing to the vagaries of DLL linking, these hooks don't work
under Windows when the program is built dynamically.</para>
<para>The hook <literal>ghc_rts_opts</literal><indexterm><primary><literal>ghc_rts_opts</literal></primary>
</indexterm>lets you set RTS
options permanently for a given program. A common use for this is
to give your program a default heap and/or stack size that is
greater than the default. For example, to set <literal>-H128m
-K1m</literal>, place the following definition in a C source
file:</para>
<programlisting>
char *ghc_rts_opts = "-H128m -K1m";
</programlisting>
<para>Compile the C file, and include the object file on the
command line when you link your Haskell program.</para>
<para>These flags are interpreted first, before any RTS flags from
the <literal>GHCRTS</literal> environment variable and any flags
on the command line.</para>
<para>You can also change the messages printed when the runtime
system “blows up,” e.g., on stack overflow. The hooks
for these are as follows:</para>
<variablelist>
<varlistentry>
<term>
<function>void OutOfHeapHook (unsigned long, unsigned long)</function>
<indexterm><primary><function>OutOfHeapHook</function></primary></indexterm>
</term>
<listitem>
<para>The heap-overflow message.</para>
</listitem>
</varlistentry>
<varlistentry>
<term>
<function>void StackOverflowHook (long int)</function>
<indexterm><primary><function>StackOverflowHook</function></primary></indexterm>
</term>
<listitem>
<para>The stack-overflow message.</para>
</listitem>
</varlistentry>
<varlistentry>
<term>
<function>void MallocFailHook (long int)</function>
<indexterm><primary><function>MallocFailHook</function></primary></indexterm>
</term>
<listitem>
<para>The message printed if <function>malloc</function>
fails.</para>
</listitem>
</varlistentry>
</variablelist>
<para>For examples of the use of these hooks, see GHC's own
versions in the file
<filename>ghc/compiler/parser/hschooks.c</filename> in a GHC
source tree.</para>
</sect2>
<sect2>
<title>Getting information about the RTS</title>
<indexterm><primary>RTS</primary></indexterm>
<para>It is possible to ask the RTS to give some information about
itself. To do this, use the <option>--info</option> flag, e.g.</para>
<screen>
$ ./a.out +RTS --info
[("GHC RTS", "YES")
,("GHC version", "6.7")
,("RTS way", "rts_p")
,("Host platform", "x86_64-unknown-linux")
,("Host architecture", "x86_64")
,("Host OS", "linux")
,("Host vendor", "unknown")
,("Build platform", "x86_64-unknown-linux")
,("Build architecture", "x86_64")
,("Build OS", "linux")
,("Build vendor", "unknown")
,("Target platform", "x86_64-unknown-linux")
,("Target architecture", "x86_64")
,("Target OS", "linux")
,("Target vendor", "unknown")
,("Word size", "64")
,("Compiler unregisterised", "NO")
,("Tables next to code", "YES")
]
</screen>
<para>The information is formatted such that it can be read as a
of type <literal>[(String, String)]</literal>. Currently the following
fields are present:</para>
<variablelist>
<varlistentry>
<term><literal>GHC RTS</literal></term>
<listitem>
<para>Is this program linked against the GHC RTS? (always
"YES").</para>
</listitem>
</varlistentry>
<varlistentry>
<term><literal>GHC version</literal></term>
<listitem>
<para>The version of GHC used to compile this program.</para>
</listitem>
</varlistentry>
<varlistentry>
<term><literal>RTS way</literal></term>
<listitem>
<para>The variant (“way”) of the runtime. The
most common values are <literal>rts</literal> (vanilla),
<literal>rts_thr</literal> (threaded runtime, i.e. linked using the
<literal>-threaded</literal> option) and <literal>rts_p</literal>
(profiling runtime, i.e. linked using the <literal>-prof</literal>
option). Other variants include <literal>debug</literal>
(linked using <literal>-debug</literal>),
<literal>t</literal> (ticky-ticky profiling) and
<literal>dyn</literal> (the RTS is
linked in dynamically, i.e. a shared library, rather than statically
linked into the executable itself). These can be combined,
e.g. you might have <literal>rts_thr_debug_p</literal>.</para>
</listitem>
</varlistentry>
<varlistentry>
<term>
<literal>Target platform</literal>,
<literal>Target architecture</literal>,
<literal>Target OS</literal>,
<literal>Target vendor</literal>
</term>
<listitem>
<para>These are the platform the program is compiled to run on.</para>
</listitem>
</varlistentry>
<varlistentry>
<term>
<literal>Build platform</literal>,
<literal>Build architecture</literal>,
<literal>Build OS</literal>,
<literal>Build vendor</literal>
</term>
<listitem>
<para>These are the platform where the program was built
on. (That is, the target platform of GHC itself.) Ordinarily
this is identical to the target platform. (It could potentially
be different if cross-compiling.)</para>
</listitem>
</varlistentry>
<varlistentry>
<term>
<literal>Host platform</literal>,
<literal>Host architecture</literal>
<literal>Host OS</literal>
<literal>Host vendor</literal>
</term>
<listitem>
<para>These are the platform where GHC itself was compiled.
Again, this would normally be identical to the build and
target platforms.</para>
</listitem>
</varlistentry>
<varlistentry>
<term><literal>Word size</literal></term>
<listitem>
<para>Either <literal>"32"</literal> or <literal>"64"</literal>,
reflecting the word size of the target platform.</para>
</listitem>
</varlistentry>
<varlistentry>
<term><literal>Compiler unregistered</literal></term>
<listitem>
<para>Was this program compiled with an “unregistered”
version of GHC? (I.e., a version of GHC that has no platform-specific
optimisations compiled in, usually because this is a currently
unsupported platform.) This value will usually be no, unless you're
using an experimental build of GHC.</para>
</listitem>
</varlistentry>
<varlistentry>
<term><literal>Tables next to code</literal></term>
<listitem>
<para>Putting info tables directly next to entry code is a useful
performance optimisation that is not available on all platforms.
This field tells you whether the program has been compiled with
this optimisation. (Usually yes, except on unusual platforms.)</para>
</listitem>
</varlistentry>
</variablelist>
</sect2>
</sect1>
<!-- Emacs stuff:
;;; Local Variables: ***
;;; sgml-parent-document: ("users_guide.xml" "book" "chapter" "sect1") ***
;;; End: ***
-->
|