summaryrefslogtreecommitdiff
path: root/rts
diff options
context:
space:
mode:
authorBen Gamari <ben@smart-cactus.org>2022-01-30 08:45:49 -0500
committerMarge Bot <ben+marge-bot@smart-cactus.org>2022-02-01 12:29:26 -0500
commit88fba8a4b3c22e953a634b81dd0b67ec66eb5e72 (patch)
tree75a46332ad32cfeaf4f4d52b3b60fd452f2493b6 /rts
parent06185102bb06d6d56e00d40172a6a473fc228501 (diff)
downloadhaskell-88fba8a4b3c22e953a634b81dd0b67ec66eb5e72.tar.gz
Fix a few Note inconsistencies
Diffstat (limited to 'rts')
-rw-r--r--rts/Apply.cmm6
-rw-r--r--rts/Capability.c3
-rw-r--r--rts/Compact.cmm2
-rw-r--r--rts/ForeignExports.c1
-rw-r--r--rts/IPE.c1
-rw-r--r--rts/Interpreter.c6
-rw-r--r--rts/Linker.c9
-rw-r--r--rts/LinkerInternals.h1
-rw-r--r--rts/PrimOps.cmm9
-rw-r--r--rts/ProfHeap.c1
-rw-r--r--rts/RaiseAsync.c2
-rw-r--r--rts/RtsFlags.c4
-rw-r--r--rts/RtsSymbols.c3
-rw-r--r--rts/Schedule.c3
-rw-r--r--rts/StablePtr.c2
-rw-r--r--rts/Stats.c9
-rw-r--r--rts/StgCRun.c3
-rw-r--r--rts/StgMiscClosures.cmm8
-rw-r--r--rts/StgStdThunks.cmm4
-rw-r--r--rts/Task.h2
-rw-r--r--rts/ThreadPaused.c2
-rw-r--r--rts/Threads.c2
-rw-r--r--rts/Timer.c3
-rw-r--r--rts/TraverseHeap.h1
-rw-r--r--rts/Updates.cmm2
-rw-r--r--rts/include/Stg.h4
-rw-r--r--rts/include/rts/Flags.h4
-rw-r--r--rts/include/rts/Libdw.h2
-rw-r--r--rts/include/rts/OSThreads.h1
-rw-r--r--rts/include/rts/prof/CCS.h1
-rw-r--r--rts/include/rts/storage/Block.h6
-rw-r--r--rts/include/rts/storage/ClosureMacros.h3
-rw-r--r--rts/include/rts/storage/Closures.h4
-rw-r--r--rts/include/rts/storage/InfoTables.h1
-rw-r--r--rts/include/rts/storage/TSO.h7
-rw-r--r--rts/include/stg/SMP.h1
-rw-r--r--rts/linker/Elf.c4
-rw-r--r--rts/linker/LoadArchive.c1
-rw-r--r--rts/linker/M32Alloc.c4
-rw-r--r--rts/linker/PEi386.c5
-rw-r--r--rts/linker/PEi386.h2
-rw-r--r--rts/linker/elf_plt_arm.c1
-rw-r--r--rts/linker/elf_reloc_aarch64.c1
-rw-r--r--rts/linker/elf_tlsgd.c2
-rw-r--r--rts/posix/OSMem.c2
-rw-r--r--rts/sm/CNF.c1
-rw-r--r--rts/sm/Evac.c6
-rw-r--r--rts/sm/GC.c4
-rw-r--r--rts/sm/GCUtils.c2
-rw-r--r--rts/sm/NonMoving.c6
-rw-r--r--rts/sm/NonMovingMark.c4
-rw-r--r--rts/sm/NonMovingScav.c6
-rw-r--r--rts/sm/NonMovingSweep.c1
-rw-r--r--rts/sm/Sanity.c1
-rw-r--r--rts/sm/Scav.c2
-rw-r--r--rts/sm/Storage.c9
-rw-r--r--rts/sm/Storage.h6
-rw-r--r--rts/win32/OSMem.c1
58 files changed, 85 insertions, 109 deletions
diff --git a/rts/Apply.cmm b/rts/Apply.cmm
index a706c68194..4c3177ae2f 100644
--- a/rts/Apply.cmm
+++ b/rts/Apply.cmm
@@ -38,7 +38,7 @@ stg_ap_0_fast ( P_ fun )
/*
Note [Evaluating functions with profiling]
-
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
If we evaluate something like
let f = {-# SCC "f" #-} g
@@ -461,8 +461,8 @@ for:
-------------------------------------------------------------------------- */
/*
- Note [AP_STACKs must be eagerly blackholed]
- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+Note [AP_STACKs must be eagerly blackholed]
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#13615 describes a nasty concurrency issue where we can enter into the
middle of an ST action multiple times, resulting in duplication of effects.
In short, the construction of an AP_STACK allows us to suspend a computation
diff --git a/rts/Capability.c b/rts/Capability.c
index 374dfe8de7..7ebe51609f 100644
--- a/rts/Capability.c
+++ b/rts/Capability.c
@@ -660,7 +660,6 @@ enqueueWorker (Capability* cap USED_IF_THREADS)
/*
* Note [Benign data race due to work-pushing]
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- *
* #17276 points out a tricky data race (noticed by ThreadSanitizer) between
* waitForWorkerCapability and schedulePushWork. In short, schedulePushWork
* works as follows:
@@ -1039,7 +1038,6 @@ yieldCapability
/*
* Note [migrated bound threads]
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- *
* There's a tricky case where:
* - cap A is running an unbound thread T1
* - there is a bound thread T2 at the head of the run queue on cap A
@@ -1060,7 +1058,6 @@ yieldCapability
*
* Note [migrated bound threads 2]
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- *
* Second tricky case;
* - A bound Task becomes a GC thread
* - scheduleDoGC() migrates the thread belonging to this Task,
diff --git a/rts/Compact.cmm b/rts/Compact.cmm
index bae94a03cd..8a358e1da5 100644
--- a/rts/Compact.cmm
+++ b/rts/Compact.cmm
@@ -297,7 +297,7 @@ stg_compactAddWithSharingzh (P_ compact, P_ p)
StgCompactNFData_hash(compact) = hash;
// Note [compactAddWorker result]
- //
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
// compactAddWorker needs somewhere to store the result - this is
// so that it can be tail-recursive. It must be an address that
// doesn't move during GC, so we can't use heap or stack.
diff --git a/rts/ForeignExports.c b/rts/ForeignExports.c
index e218281b51..e4d7d9a39a 100644
--- a/rts/ForeignExports.c
+++ b/rts/ForeignExports.c
@@ -17,7 +17,6 @@ static ObjectCode *loading_obj = NULL;
/*
* Note [Tracking foreign exports]
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- *
* Foreign exports are garbage collection roots. That is, things (e.g. CAFs)
* depended upon by a module's `foreign export`s need to be kept alive for as
* long an module is loaded. To ensure this we create a stable pointer to each
diff --git a/rts/IPE.c b/rts/IPE.c
index 3557b0f33f..5ab8a861fd 100644
--- a/rts/IPE.c
+++ b/rts/IPE.c
@@ -27,7 +27,6 @@
/*
Note [The Info Table Provenance Entry (IPE) Map]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
IPEs are stored in a hash map from info table address (pointer) to IPE. This
ensures cheap lookup and traversal.
diff --git a/rts/Interpreter.c b/rts/Interpreter.c
index bcda08018a..c911d99367 100644
--- a/rts/Interpreter.c
+++ b/rts/Interpreter.c
@@ -103,7 +103,7 @@
#endif
// Note [Not true: ASSERT(Sp > SpLim)]
-//
+// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
// SpLim has some headroom (RESERVED_STACK_WORDS) to allow for saving
// any necessary state on the stack when returning to the scheduler
// when a stack check fails.. The upshot of this is that Sp could be
@@ -117,7 +117,7 @@
return cap;
// Note [avoiding threadPaused]
-//
+// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~
// Switching between the interpreter to compiled code can happen very
// frequently, so we don't want to call threadPaused(), which is
// expensive. BUT we must be careful not to violate the invariant
@@ -1678,7 +1678,7 @@ run_BCO:
SET_HDR(con, (StgInfoTable*)BCO_LIT(o_itbl), cap->r.rCCCS);
// Note [Data constructor dynamic tags]
- //
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
// compute the pointer tag for the constructor and tag the pointer
//
// - 1..(TAG_MASK-1): for first TAG_MASK-1 constructors
diff --git a/rts/Linker.c b/rts/Linker.c
index e30fadb262..6c13213092 100644
--- a/rts/Linker.c
+++ b/rts/Linker.c
@@ -80,7 +80,7 @@
#endif
/*
Note [runtime-linker-support]
- -----------------------------
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
When adding support for a new platform to the runtime linker please
update `$TOP/configure.ac` under heading `Does target have runtime
linker support?`.
@@ -94,7 +94,7 @@
addresses of unloaded symbols.
Note [runtime-linker-phases]
- --------------------------------------
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Broadly the behavior of the runtime linker can be
split into the following four phases:
@@ -247,7 +247,7 @@ static void ghciRemoveSymbolTable(StrHashTable *table, const SymbolName* key,
*/
/*
Note [weak-symbols-support]
- -------------------------------------
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~~
While ghciInsertSymbolTable does implement extensive
logic for weak symbol support, weak symbols are not currently
fully supported by the RTS. This code is mostly here for COMDAT
@@ -610,7 +610,7 @@ internal_dlopen(const char *dll_name)
/*
Note [RTLD_LOCAL]
-
+ ~~~~~~~~~~~~~~~~~
In GHCi we want to be able to override previous .so's with newly
loaded .so's when we recompile something. This further implies that
when we look up a symbol in internal_dlsym() we have to iterate
@@ -1716,6 +1716,7 @@ HsInt loadOc (ObjectCode* oc)
}
/* Note [loadOc orderings]
+ ~~~~~~~~~~~~~~~~~~~~~~~
The order of `ocAllocateExtras` and `ocGetNames` matters. For MachO
and ELF, `ocInit` and `ocGetNames` initialize a bunch of pointers based
on the offset to `oc->image`, but `ocAllocateExtras` may relocate
diff --git a/rts/LinkerInternals.h b/rts/LinkerInternals.h
index 7058ad355b..f3d918e355 100644
--- a/rts/LinkerInternals.h
+++ b/rts/LinkerInternals.h
@@ -118,6 +118,7 @@ typedef enum {
/*
* Note [No typedefs for customizable types]
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
* Some pointer-to-struct types are defined opaquely
* first, and customized later to architecture/ABI-specific
* instantiations. Having the usual
diff --git a/rts/PrimOps.cmm b/rts/PrimOps.cmm
index c5b6065ec2..84c5850f97 100644
--- a/rts/PrimOps.cmm
+++ b/rts/PrimOps.cmm
@@ -166,7 +166,6 @@ stg_isMutableByteArrayPinnedzh ( gcptr mba )
/* Note [LDV profiling and resizing arrays]
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- *
* As far as the LDV profiler is concerned arrays are "inherently used" which
* means we don't track their time of use and eventual destruction. We just
* assume they get used.
@@ -1562,7 +1561,7 @@ stg_writeTVarzh (P_ tvar, /* :: TVar a */
* exception and never perform its take or put, and we'd end up with a
* deadlock.
*
- * Note [Nonmoving write barrier in Perform{Take,Put}]
+ * Note [Nonmoving write barrier in Perform{Put,Take}]
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
* As noted in Note [Non-moving garbage collector] in NonMoving.c, the
* non-moving GC requires that all overwritten pointers be pushed to the update
@@ -1825,7 +1824,7 @@ stg_putMVarzh ( P_ mvar, /* :: MVar a */
StgMVarTSOQueue_tso(q) = CurrentTSO;
SET_HDR(q, stg_MVAR_TSO_QUEUE_info, CCS_SYSTEM);
- //See Note [Heap memory barriers]
+ // See Note [Heap memory barriers]
prim_write_barrier;
if (StgMVar_head(mvar) == stg_END_TSO_QUEUE_closure) {
@@ -2036,7 +2035,7 @@ stg_readMVarzh ( P_ mvar, /* :: MVar a */ )
StgMVarTSOQueue_tso(q) = CurrentTSO;
SET_HDR(q, stg_MVAR_TSO_QUEUE_info, CCS_SYSTEM);
- //See Note [Heap memory barriers]
+ // See Note [Heap memory barriers]
prim_write_barrier;
StgTSO__link(CurrentTSO) = q;
@@ -2169,7 +2168,7 @@ stg_readIOPortzh ( P_ ioport /* :: IOPort a */ )
StgMVarTSOQueue_tso(q) = CurrentTSO;
SET_HDR(q, stg_MVAR_TSO_QUEUE_info, CCS_SYSTEM);
- //See Note [Heap memory barriers]
+ // See Note [Heap memory barriers]
prim_write_barrier;
StgMVar_head(ioport) = q;
diff --git a/rts/ProfHeap.c b/rts/ProfHeap.c
index 82d9059f24..7921041a5a 100644
--- a/rts/ProfHeap.c
+++ b/rts/ProfHeap.c
@@ -1228,7 +1228,6 @@ heapCensusBlock(Census *census, bdescr *bd)
while (p < bd->free && !*p) p++;
/* Note [skipping slop in the heap profiler]
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- *
* We make sure to zero slop that can remain after a major GC so
* here we can assume any slop words we see until the block's free
* pointer are zero. Since info pointers are always nonzero we can
diff --git a/rts/RaiseAsync.c b/rts/RaiseAsync.c
index 39f39a22b4..b668b6a178 100644
--- a/rts/RaiseAsync.c
+++ b/rts/RaiseAsync.c
@@ -93,7 +93,7 @@ suspendComputation (Capability *cap, StgTSO *tso, StgUpdateFrame *stop_here)
throwTo().
Note [Throw to self when masked]
-
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
When a StackOverflow occurs when the thread is masked, we want to
defer the exception to when the thread becomes unmasked/hits an
interruptible point. We already have a mechanism for doing this,
diff --git a/rts/RtsFlags.c b/rts/RtsFlags.c
index c200dcde5d..29664831f8 100644
--- a/rts/RtsFlags.c
+++ b/rts/RtsFlags.c
@@ -552,7 +552,7 @@ usage_text[] = {
/**
Note [Windows Unicode Arguments]
-~~~~~~~~~~~~~~~~~~~~~~~~~~
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
On Windows argv is usually encoded in the current Codepage which might not
support unicode.
@@ -2587,7 +2587,7 @@ void freeRtsArgs(void)
/*
Note [OPTION_SAFE vs OPTION_UNSAFE]
-
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Ticket #3910 originally pointed out that the RTS options are a potential
security problem. For example the -t -s or -S flags can be used to
overwrite files. This would be bad in the context of CGI scripts or
diff --git a/rts/RtsSymbols.c b/rts/RtsSymbols.c
index b2c85b591c..e186830b4e 100644
--- a/rts/RtsSymbols.c
+++ b/rts/RtsSymbols.c
@@ -93,7 +93,6 @@ extern char **environ;
/*
* Note [Strong symbols]
* ~~~~~~~~~~~~~~~~~~~~~
- *
* The notion of a *weak* symbol is fairly common in linking: a symbol is weak
* if it is declared but not defined, allowing it to be defined by an object
* which is loaded later. GHC generalizes this notion, allowing symbol
@@ -112,7 +111,6 @@ extern char **environ;
/*
* Note [Symbols for MinGW's printf]
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- *
* The printf offered by Microsoft's libc implementation, msvcrt, is quite
* incomplete, lacking support for even %ull. Consequently mingw-w64 offers its
* own implementation which we enable. However, to be thread-safe the
@@ -132,7 +130,6 @@ extern char **environ;
*/
/* Note [_iob_func symbol]
* ~~~~~~~~~~~~~~~~~~~~~~~
- *
* Microsoft in VS2013 to VS2015 transition made a backwards incompatible change
* to the stdio function __iob_func.
*
diff --git a/rts/Schedule.c b/rts/Schedule.c
index b9b15811c9..fa48bef1a7 100644
--- a/rts/Schedule.c
+++ b/rts/Schedule.c
@@ -224,6 +224,7 @@ schedule (Capability *initialCapability, Task *task)
}
// Note [shutdown]: The interruption / shutdown sequence.
+ // ~~~~~~~~~~~~~~~
//
// In order to cleanly shut down the runtime, we want to:
// * make sure that all main threads return to their callers
@@ -649,7 +650,7 @@ shouldYieldCapability (Capability *cap, Task *task, bool didGcLast)
// and this task it bound).
//
// Note [GC livelock]
- //
+ // ~~~~~~~~~~~~~~~~~~
// If we are interrupted to do a GC, then we do not immediately do
// another one. This avoids a starvation situation where one
// Capability keeps forcing a GC and the other Capabilities make no
diff --git a/rts/StablePtr.c b/rts/StablePtr.c
index 8f860d480c..ffd1d0775a 100644
--- a/rts/StablePtr.c
+++ b/rts/StablePtr.c
@@ -200,7 +200,7 @@ enlargeStablePtrTable(void)
}
/* Note [Enlarging the stable pointer table]
- *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
* To enlarge the stable pointer table, we allocate a new table, copy the
* existing entries, and then store the old version of the table in old_SPTs
* until we free it during GC. By not immediately freeing the old version
diff --git a/rts/Stats.c b/rts/Stats.c
index 7abe2f7417..ea57f60e70 100644
--- a/rts/Stats.c
+++ b/rts/Stats.c
@@ -764,8 +764,7 @@ StgInt TOTAL_CALLS=1;
/*
Note [RTS Stats Reporting]
-==========================
-
+~~~~~~~~~~~~~~~~~~~~~~~~~~
There are currently three reporting functions:
* report_summary:
Responsible for producing '+RTS -s' output.
@@ -948,7 +947,7 @@ static void report_summary(const RTSSummaryStats* sum)
sum->productivity_cpu_percent * 100,
sum->productivity_elapsed_percent * 100);
- // See Note [Internal Counter Stats] for a description of the
+ // See Note [Internal Counters Stats] for a description of the
// following counters. If you add a counter here, please remember
// to update the Note.
if (RtsFlags.MiscFlags.internalCounters) {
@@ -1474,7 +1473,7 @@ void stat_exit()
}
/* Note [Work Balance]
-----------------------
+~~~~~~~~~~~~~~~~~~~~~~
Work balance is a measure of how evenly the work done during parallel garbage
collection is spread across threads. To compute work balance we must take care
to account for the number of GC threads changing between GCs. The statistics we
@@ -1553,7 +1552,7 @@ See #13830
/*
Note [Internal Counters Stats]
------------------------------
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
What do the counts at the end of a '+RTS -s --internal-counters' report mean?
They are detailed below. Most of these counters are used by multiple threads
with no attempt at synchronisation. This means that reported values may be
diff --git a/rts/StgCRun.c b/rts/StgCRun.c
index 8e536ad6c2..7f5b6d169b 100644
--- a/rts/StgCRun.c
+++ b/rts/StgCRun.c
@@ -113,7 +113,6 @@ StgFunPtr StgReturn(void)
/*
* Note [Stack Alignment on X86]
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- *
* On X86 (both 32bit and 64bit) we keep the stack aligned on function calls at
* a 16-byte boundary. This is done because on a number of architectures the
* ABI requires this (x64, Mac OSX 32bit/64bit) as well as interfacing with
@@ -149,7 +148,7 @@ StgFunPtr StgReturn(void)
* for stg_stop_thread in StgStartup.cmm.
*
* Note [Windows Stack allocations]
- *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
* On windows the stack has to be allocated 4k at a time, otherwise
* we get a segfault. This is done by using a helper ___chkstk_ms that is
* provided by libgcc. The Haskell side already knows how to handle this
diff --git a/rts/StgMiscClosures.cmm b/rts/StgMiscClosures.cmm
index b78fca74cc..e9186e3500 100644
--- a/rts/StgMiscClosures.cmm
+++ b/rts/StgMiscClosures.cmm
@@ -193,7 +193,7 @@ INFO_TABLE_RET( stg_ctoi_V, RET_BCO )
}
/* Note [GHCi unboxed tuples stack spills]
-
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
In the calling convention for compiled code, a tuple is returned
in registers, with everything that doesn't fit spilled onto the STG
stack.
@@ -334,7 +334,7 @@ MK_STG_CTOI_T(62)
/*
Note [GHCi tuple layout]
-
+ ~~~~~~~~~~~~~~~~~~~~~~~~
the tuple_info word describes the register and stack usage of the tuple:
[ ssss ssss rrrr rrrr rrrr rrrr rrrr rrrr ]
@@ -900,8 +900,8 @@ INFO_TABLE( stg_COMPACT_NFDATA_DIRTY, 0, 9, COMPACT_NFDATA, "COMPACT_NFDATA", "C
{ foreign "C" barf("COMPACT_NFDATA_DIRTY object (%p) entered!", R1) never returns; }
/* ----------------------------------------------------------------------------
- Note [CHARLIKE and INTLIKE closures.]
-
+ Note [CHARLIKE and INTLIKE closures]
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
These are static representations of Chars and small Ints, so that
we can remove dynamic Chars and Ints during garbage collection and
replace them with references to the static objects.
diff --git a/rts/StgStdThunks.cmm b/rts/StgStdThunks.cmm
index 5239496be5..3c528f662f 100644
--- a/rts/StgStdThunks.cmm
+++ b/rts/StgStdThunks.cmm
@@ -53,7 +53,9 @@
* because LDV profiling relies on entering closures to mark them as
* "used".
*
- * Note [untag for prof]: when we enter a closure, the convention is
+ * Note [untag for prof]
+ * ~~~~~~~~~~~~~~~~~~~~~
+ * When we enter a closure, the convention is
* that the closure pointer passed in the first argument is
* *untagged*. Without profiling we don't have to worry about this,
* because we never enter a tagged pointer.
diff --git a/rts/Task.h b/rts/Task.h
index 9b6a8e8d7b..fd7b68aecf 100644
--- a/rts/Task.h
+++ b/rts/Task.h
@@ -18,7 +18,6 @@
/*
Note [Definition of a Task]
~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
A task is an OSThread that runs Haskell code. Every OSThread that
runs inside the RTS, whether as a worker created by the RTS or via
an in-call from C to Haskell, has an associated Task. The first
@@ -35,7 +34,6 @@
Note [Ownership of Task]
~~~~~~~~~~~~~~~~~~~~~~~~
-
Task ownership is a little tricky. The default situation is that
the Task is an OS-thread-local structure that is owned by the OS
thread named in task->id. An OS thread not currently executing
diff --git a/rts/ThreadPaused.c b/rts/ThreadPaused.c
index c94b95afab..ffa1168a27 100644
--- a/rts/ThreadPaused.c
+++ b/rts/ThreadPaused.c
@@ -252,7 +252,7 @@ threadPaused(Capability *cap, StgTSO *tso)
retry:
#endif
// Note [suspend duplicate work]
- //
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
// If the info table is a WHITEHOLE or a BLACKHOLE, then
// another thread has claimed it (via the SET_INFO()
// below), or is in the process of doing so. In that case
diff --git a/rts/Threads.c b/rts/Threads.c
index ab7af2e52c..1972f14895 100644
--- a/rts/Threads.c
+++ b/rts/Threads.c
@@ -570,7 +570,7 @@ threadStackOverflow (Capability *cap, StgTSO *tso)
stg_min(tso->stackobj->stack + tso->stackobj->stack_size,
tso->stackobj->sp+64)));
- // Note [Throw to self when masked], also #767 and #8303.
+ // See Note [Throw to self when masked], also #767 and #8303.
throwToSelf(cap, tso, (StgClosure *)stackOverflow_closure);
return;
}
diff --git a/rts/Timer.c b/rts/Timer.c
index ec3dff0a5c..e6666856a6 100644
--- a/rts/Timer.c
+++ b/rts/Timer.c
@@ -44,8 +44,7 @@ static int ticks_to_eventlog_flush = 0;
/*
Note [GC During Idle Time]
- --------------------------
-
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~
In the threaded RTS, a major GC can be performed during idle time (i.e., when
no Haskell computations are ready to run). This can be beneficial for two
reasons. First, running the GC during idle time makes it less likely that a GC
diff --git a/rts/TraverseHeap.h b/rts/TraverseHeap.h
index 0bc553e094..2ac20e9cc5 100644
--- a/rts/TraverseHeap.h
+++ b/rts/TraverseHeap.h
@@ -97,7 +97,6 @@ typedef struct stackElement_ {
typedef struct traverseState_ {
/** Note [Profiling heap traversal visited bit]
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- *
* If the RTS is compiled with profiling enabled StgProfHeader can be used
* by profiling code to store per-heap object information. Specifically the
* 'hp_hdr' field is used to store heap profiling information.
diff --git a/rts/Updates.cmm b/rts/Updates.cmm
index d459607752..57839216c4 100644
--- a/rts/Updates.cmm
+++ b/rts/Updates.cmm
@@ -91,7 +91,7 @@ INFO_TABLE_RET ( stg_bh_upd_frame, UPDATE_FRAME,
}
/* Note [HpAlloc]
- *
+ * ~~~~~~~~~~~~~~
* HpAlloc is required to be zero unless we just bumped Hp and failed
* the heap check: see HeapStackCheck.cmm. Failures that result from
* HpAlloc being non-zero are very hard to track down, because they
diff --git a/rts/include/Stg.h b/rts/include/Stg.h
index 156c3a283c..be0995445b 100644
--- a/rts/include/Stg.h
+++ b/rts/include/Stg.h
@@ -268,8 +268,10 @@ typedef StgFunPtr F_;
/* foreign functions: */
#define EFF_(f) void f() /* See Note [External function prototypes] */
-/* Note [External function prototypes] See #8965, #11395
+/* Note [External function prototypes]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+(see #8965, #11395)
+
In generated C code we need to distinct between two types
of external symbols:
1. Cmm functions declared by 'EF_' macro (External Functions)
diff --git a/rts/include/rts/Flags.h b/rts/include/rts/Flags.h
index 2936876b7a..eb38e80794 100644
--- a/rts/include/rts/Flags.h
+++ b/rts/include/rts/Flags.h
@@ -22,7 +22,7 @@
/* For defaults, see the @initRtsFlagsDefaults@ routine. */
/* Note [Synchronization of flags and base APIs]
- *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
* We provide accessors to RTS flags in base. (GHC.RTS module)
* The API should be updated whenever RTS flags are modified.
*/
@@ -230,7 +230,7 @@ typedef struct _MISC_FLAGS {
memory management for non-GC related
tasks in the future, we'd respect it
there as well. */
- bool internalCounters; /* See Note [Internal Counter Stats] */
+ bool internalCounters; /* See Note [Internal Counters Stats] */
bool linkerAlwaysPic; /* Assume the object code is always PIC */
StgWord linkerMemBase; /* address to ask the OS for memory
* for the linker, NULL ==> off */
diff --git a/rts/include/rts/Libdw.h b/rts/include/rts/Libdw.h
index d7bd55d06e..7076611dfd 100644
--- a/rts/include/rts/Libdw.h
+++ b/rts/include/rts/Libdw.h
@@ -18,8 +18,8 @@
/*
* Note [Chunked stack representation]
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- *
* Consider the stack,
+ *
* main calls (bottom of stack)
* func1 which in turn calls
* func2 which calls
diff --git a/rts/include/rts/OSThreads.h b/rts/include/rts/OSThreads.h
index d24a1313a6..77241341b7 100644
--- a/rts/include/rts/OSThreads.h
+++ b/rts/include/rts/OSThreads.h
@@ -114,6 +114,7 @@ typedef DWORD ThreadLocalKey;
#define INIT_COND_VAR 0
/* Note [SRW locks]
+ ~~~~~~~~~~~~~~~~
We have a choice for implementing Mutexes on Windows. Standard
Mutexes are kernel objects that require kernel calls to
acquire/release, whereas CriticalSections are spin-locks that block
diff --git a/rts/include/rts/prof/CCS.h b/rts/include/rts/prof/CCS.h
index 7685f03003..a155e1385b 100644
--- a/rts/include/rts/prof/CCS.h
+++ b/rts/include/rts/prof/CCS.h
@@ -18,6 +18,7 @@
* ---------------------------------------------------------------------------*/
/*
* Note [struct alignment]
+ * ~~~~~~~~~~~~~~~~~~~~~~~
* NB. be careful to avoid unwanted padding between fields, by
* putting the 8-byte fields on an 8-byte boundary. Padding can
* vary between C compilers, and we don't take into account any
diff --git a/rts/include/rts/storage/Block.h b/rts/include/rts/storage/Block.h
index 730947e375..141ec777be 100644
--- a/rts/include/rts/storage/Block.h
+++ b/rts/include/rts/storage/Block.h
@@ -28,7 +28,7 @@
#define BLOCK_SIZE (1<<BLOCK_SHIFT)
#else
#define BLOCK_SIZE (UNIT<<BLOCK_SHIFT)
-// Note [integer overflow]
+// See Note [integer overflow]
#endif
#define BLOCK_SIZE_W (BLOCK_SIZE/sizeof(W_))
@@ -43,7 +43,7 @@
#define MBLOCK_SIZE (1<<MBLOCK_SHIFT)
#else
#define MBLOCK_SIZE (UNIT<<MBLOCK_SHIFT)
-// Note [integer overflow]
+// See Note [integer overflow]
#endif
#define MBLOCK_SIZE_W (MBLOCK_SIZE/sizeof(W_))
@@ -60,7 +60,7 @@
/*
* Note [integer overflow]
- *
+ * ~~~~~~~~~~~~~~~~~~~~~~~
* The UL suffix in BLOCK_SIZE and MBLOCK_SIZE promotes the expression
* to an unsigned long, which means that expressions involving these
* will be promoted to unsigned long, which makes integer overflow
diff --git a/rts/include/rts/storage/ClosureMacros.h b/rts/include/rts/storage/ClosureMacros.h
index b841ef8be0..393bee3a9f 100644
--- a/rts/include/rts/storage/ClosureMacros.h
+++ b/rts/include/rts/storage/ClosureMacros.h
@@ -480,10 +480,11 @@ INLINE_HEADER StgWord8 *mutArrPtrsCard (StgMutArrPtrs *a, W_ n)
Replacing a closure with a different one. We must call
OVERWRITING_CLOSURE(p) on the old closure that is about to be
overwritten.
+ */
+ /*
Note [zeroing slop when overwriting closures]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
When we overwrite a closure in the heap with a smaller one, in some scenarios
we need to write zero words into "slop"; the memory that is left
unoccupied. See Note [slop on the heap]
diff --git a/rts/include/rts/storage/Closures.h b/rts/include/rts/storage/Closures.h
index b28315f76a..44188f3394 100644
--- a/rts/include/rts/storage/Closures.h
+++ b/rts/include/rts/storage/Closures.h
@@ -491,6 +491,6 @@ typedef struct StgCompactNFData_ {
// Used temporarily to store the result of compaction. Doesn't need to be
// a GC root.
struct StgCompactNFData_ *link;
- // Used by compacting GC for linking CNFs with threaded hash tables. See
- // Note [CNFs in compacting GC] in Compact.c for details.
+ // Used by compacting GC for linking CNFs with threaded hash tables.
+ // See Note [CNFs in compacting GC] in Compact.c for details.
} StgCompactNFData;
diff --git a/rts/include/rts/storage/InfoTables.h b/rts/include/rts/storage/InfoTables.h
index 55d9ad6542..55aba6b4d7 100644
--- a/rts/include/rts/storage/InfoTables.h
+++ b/rts/include/rts/storage/InfoTables.h
@@ -235,7 +235,6 @@ typedef struct StgInfoTable_ {
/*
* Note [Encoding static reference tables]
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
* As static reference tables appear frequently in code, we use a special
* compact encoding for the common case of a module defining only a few CAFs: We
* produce one table containing a list of CAFs in the module and then include a
diff --git a/rts/include/rts/storage/TSO.h b/rts/include/rts/storage/TSO.h
index 874d61ab60..d21cd7a645 100644
--- a/rts/include/rts/storage/TSO.h
+++ b/rts/include/rts/storage/TSO.h
@@ -191,10 +191,9 @@ typedef struct StgTSO_ {
/* Note [StgStack dirtiness flags and concurrent marking]
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- *
- * Without concurrent collection by the nonmoving collector the stack dirtiness story
- * is quite simple: The stack is either STACK_DIRTY (meaning it has been added to mut_list)
- * or not.
+ * Without concurrent collection by the nonmoving collector the stack dirtiness
+ * story is quite simple: The stack is either STACK_DIRTY (meaning it has been
+ * added to mut_list) or not.
*
* However, things are considerably more complicated with concurrent collection
* (namely, when nonmoving_write_barrier_enabled is set): In addition to adding
diff --git a/rts/include/stg/SMP.h b/rts/include/stg/SMP.h
index a1a714f4c9..f672009c76 100644
--- a/rts/include/stg/SMP.h
+++ b/rts/include/stg/SMP.h
@@ -107,7 +107,6 @@ EXTERN_INLINE void load_load_barrier(void);
/*
* Note [Heap memory barriers]
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~
- *
* Machines with weak memory ordering semantics have consequences for how
* closures are observed and mutated. For example, consider a thunk that needs
* to be updated to an indirection. In order for the indirection to be safe for
diff --git a/rts/linker/Elf.c b/rts/linker/Elf.c
index c5d009639e..76145dbbf4 100644
--- a/rts/linker/Elf.c
+++ b/rts/linker/Elf.c
@@ -110,9 +110,8 @@
#endif
/*
-
Note [Many ELF Sections]
-
+ ~~~~~~~~~~~~~~~~~~~~~~~~
The normal section number fields in ELF are limited to 16 bits, which runs
out of bits when you try to cram in more sections than that.
@@ -1245,6 +1244,7 @@ do_Elf_Rel_relocations ( ObjectCode* oc, char* ehdrC,
if(needs_veneer) { /* overflow or thum interworking */
// Note [PC bias]
+ // ~~~~~~~~~~~~~~
// From the ELF for the ARM Architecture documentation:
// > 4.6.1.1 Addends and PC-bias compensation
// > A binary file may use REL or RELA relocations or a mixture
diff --git a/rts/linker/LoadArchive.c b/rts/linker/LoadArchive.c
index 041ebef4b6..ff8630d57e 100644
--- a/rts/linker/LoadArchive.c
+++ b/rts/linker/LoadArchive.c
@@ -468,6 +468,7 @@ static HsInt loadArchive_ (pathchar *path)
#if defined(OBJFORMAT_PEi386)
/*
* Note [MSVC import files (ext .lib)]
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
* MSVC compilers store the object files in
* the import libraries with extension .dll
* so on Windows we should look for those too.
diff --git a/rts/linker/M32Alloc.c b/rts/linker/M32Alloc.c
index cd8751b3b0..69613d8d7c 100644
--- a/rts/linker/M32Alloc.c
+++ b/rts/linker/M32Alloc.c
@@ -18,10 +18,8 @@
#include <stdio.h>
/*
-
Note [Compile Time Trickery]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
This file implements two versions of each of the `m32_*` functions. At the top
of the file there is the real implementation (compiled in when
`NEED_M32` is true) and a dummy implementation that exists only to
@@ -45,10 +43,8 @@ still check the call for syntax and correct function parameter types.
#if defined(NEED_M32)
/*
-
Note [M32 Allocator]
~~~~~~~~~~~~~~~~~~~~
-
A memory allocator that allocates only pages in the 32-bit range (lower 2GB).
This is useful on 64-bit platforms to ensure that addresses of allocated
objects can be referenced with a 32-bit relative offset.
diff --git a/rts/linker/PEi386.c b/rts/linker/PEi386.c
index f186da0af8..011e47a21b 100644
--- a/rts/linker/PEi386.c
+++ b/rts/linker/PEi386.c
@@ -59,7 +59,6 @@
Note [BFD import library]
~~~~~~~~~~~~~~~~~~~~~~~~~
-
On Windows, compilers don't link directly to dynamic libraries.
The reason for this is that the exports are not always by symbol, the
Import Address Table (IAT) also allows exports by ordinal number
@@ -128,7 +127,6 @@
Note [Memory allocation]
~~~~~~~~~~~~~~~~~~~~~~~~
-
Previously on Windows we would use VirtualAlloc to allocate enough space for
loading the entire object file into memory and keep it there for the duration
until the entire object file has been unloaded.
@@ -166,7 +164,6 @@
Note [Section alignment]
~~~~~~~~~~~~~~~~~~~~~~~~
-
The Windows linker aligns memory to it's section alignment requirement by
aligning it during the copying to the private heap. We also ensure that the
trampoline "region" we reserve is 8 bytes aligned.
@@ -1996,7 +1993,7 @@ ocResolve_PEi386 ( ObjectCode* oc )
/*
Note [ELF constant in PE file]
-
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
For some reason, the PE files produced by GHC contain a linux
relocation constant 17 (0x11) in the object files. As far as I (Phyx-) can tell
this constant doesn't seem like it's coming from GHC, or at least I could not find
diff --git a/rts/linker/PEi386.h b/rts/linker/PEi386.h
index 4c33dfd4d9..8e6e844efb 100644
--- a/rts/linker/PEi386.h
+++ b/rts/linker/PEi386.h
@@ -158,7 +158,7 @@ uint8_t* getSymShortName ( COFF_HEADER_INFO *info, COFF_symbol* sym );
/*
Note [mingw-w64 name decoration scheme]
-
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
What's going on with name decoration? Well, original code
have some crufty and ad-hocish paths related mostly to very old
mingw gcc/binutils/runtime combinations. Now mingw-w64 offers pretty
diff --git a/rts/linker/elf_plt_arm.c b/rts/linker/elf_plt_arm.c
index bd21243ec4..5b67bf8ac4 100644
--- a/rts/linker/elf_plt_arm.c
+++ b/rts/linker/elf_plt_arm.c
@@ -58,7 +58,6 @@ bool makeStubArmThm(Stub * s);
/*
Note [The ARM/Thumb Story]
~~~~~~~~~~~~~~~~~~~~~~~~~~
-
Support for the ARM architecture is complicated by the fact that ARM has not
one but several instruction encodings. The two relevant ones here are the
original ARM encoding and Thumb, a more dense variant of ARM supporting only
diff --git a/rts/linker/elf_reloc_aarch64.c b/rts/linker/elf_reloc_aarch64.c
index d8c4f8b724..790378ab0e 100644
--- a/rts/linker/elf_reloc_aarch64.c
+++ b/rts/linker/elf_reloc_aarch64.c
@@ -229,6 +229,7 @@ computeAddend(Section * section, Elf_Rel * rel,
/* note: we are encoding bits [27:2] */
if(!isInt64(26+2, V)) {
// Note [PC bias aarch64]
+ // ~~~~~~~~~~~~~~~~~~~~~~
// There is no PC bias to accommodate in the
// relocation of a place containing an instruction
// that formulates a PC-relative address. The program
diff --git a/rts/linker/elf_tlsgd.c b/rts/linker/elf_tlsgd.c
index ec42e29ac6..a22ed0b731 100644
--- a/rts/linker/elf_tlsgd.c
+++ b/rts/linker/elf_tlsgd.c
@@ -4,7 +4,7 @@
/*
* Note [TLSGD relocation]
- *
+ * ~~~~~~~~~~~~~~~~~~~~~~~
* Quick background: FreeBSD's <ctype.h> is poisoned with static inline code
* that gets compiled into every program that uses functions like isdigit(3).
* When compiled "-c -fpic" for inclusion in position-independent ".a" files
diff --git a/rts/posix/OSMem.c b/rts/posix/OSMem.c
index fff2f1e590..822546d5d1 100644
--- a/rts/posix/OSMem.c
+++ b/rts/posix/OSMem.c
@@ -652,7 +652,7 @@ void osCommitMemory(void *at, W_ size)
}
/* Note [MADV_FREE and MADV_DONTNEED]
- *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
* madvise() provides flags with which one can release no longer needed pages
* back to the kernel without having to munmap() (which is expensive).
*
diff --git a/rts/sm/CNF.c b/rts/sm/CNF.c
index a6bd3b69f0..1f40402c63 100644
--- a/rts/sm/CNF.c
+++ b/rts/sm/CNF.c
@@ -36,7 +36,6 @@
/*
Note [Compact Normal Forms]
~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
A compact normal form (CNF) is a region of memory containing one or more
Haskell data structures. The goals are:
diff --git a/rts/sm/Evac.c b/rts/sm/Evac.c
index 0e0e887b1e..834df459b4 100644
--- a/rts/sm/Evac.c
+++ b/rts/sm/Evac.c
@@ -43,7 +43,6 @@
/* Note [Selector optimisation depth limit]
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- *
* MAX_THUNK_SELECTOR_DEPTH is used to avoid long recursion of
* eval_thunk_selector due to nested selector thunks. Note that this *only*
* counts nested selector thunks, e.g. `fst (fst (... (fst x)))`. The collector
@@ -174,7 +173,6 @@ alloc_for_copy (uint32_t size, uint32_t gen_no)
/*
* Note [Non-moving GC: Marking evacuated objects]
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- *
* When the non-moving collector is in use we must be careful to ensure that any
* references to objects in the non-moving generation from younger generations
* are pushed to the mark queue.
@@ -695,7 +693,7 @@ loop:
if (!HEAP_ALLOCED_GC(q)) {
if (!major_gc) return;
- // Note [Object unloading] in CheckUnload.c
+ // See Note [Object unloading] in CheckUnload.c
if (RTS_UNLIKELY(unload_mark_needed)) {
markObjectCode(q);
}
@@ -933,7 +931,7 @@ loop:
return;
}
// Note [BLACKHOLE pointing to IND]
- //
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
// BLOCKING_QUEUE can be overwritten by IND (see
// wakeBlockingQueue()). However, when this happens we must
// be updating the BLACKHOLE, so the BLACKHOLE's indirectee
diff --git a/rts/sm/GC.c b/rts/sm/GC.c
index 64d0924059..15aef3a9fc 100644
--- a/rts/sm/GC.c
+++ b/rts/sm/GC.c
@@ -158,6 +158,7 @@ StgWord8 the_gc_thread[sizeof(gc_thread) + 64 * sizeof(gen_workspace)]
#endif // THREADED_RTS
/* Note [n_gc_threads]
+ ~~~~~~~~~~~~~~~~~~~
This is a global variable that originally tracked the number of threads
participating in the current gc. It's meaning has diverged from this somewhat,
as it does not distinguish betweeen idle and non-idle threads. An idle thread
@@ -2197,7 +2198,7 @@ bool doIdleGCWork(Capability *cap STG_UNUSED, bool all)
/* Note [Synchronising work stealing]
- *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
* During parallel garbage collections, idle gc threads will steal work from
* other threads. If they see no work to steal then they will wait on a
* condition variabl(gc_running_cv).
@@ -2243,6 +2244,7 @@ bool doIdleGCWork(Capability *cap STG_UNUSED, bool all)
* */
/* Note [Scaling retained memory]
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
* Tickets: #19381 #19359 #14702
*
* After a spike in memory usage we have been conservative about returning
diff --git a/rts/sm/GCUtils.c b/rts/sm/GCUtils.c
index 627c95fb42..9d57bf7d9e 100644
--- a/rts/sm/GCUtils.c
+++ b/rts/sm/GCUtils.c
@@ -203,7 +203,7 @@ push_todo_block(bdescr *bd, gen_workspace *ws)
}
/* Note [big objects]
-
+ ~~~~~~~~~~~~~~~~~~
We can get an ordinary object (CONSTR, FUN, THUNK etc.) that is
larger than a block (see #7919). Let's call these "big objects".
These objects don't behave like large objects - they live in
diff --git a/rts/sm/NonMoving.c b/rts/sm/NonMoving.c
index dd019ec18b..a918f422cf 100644
--- a/rts/sm/NonMoving.c
+++ b/rts/sm/NonMoving.c
@@ -229,7 +229,7 @@ Mutex concurrent_coll_finished_lock;
* - Note [StgStack dirtiness flags and concurrent marking] (TSO.h) describes
* the protocol for concurrent marking of stacks.
*
- * - Note [Nonmoving write barrier in Perform{Take,Put}] (PrimOps.cmm) describes
+ * - Note [Nonmoving write barrier in Perform{Put,Take}] (PrimOps.cmm) describes
* a tricky barrier necessary when resuming threads blocked on MVar
* operations.
*
@@ -328,8 +328,8 @@ Mutex concurrent_coll_finished_lock;
* The implementation details of this are described in Note [Non-moving GC:
* Marking evacuated objects] in Evac.c.
*
- * Note [Deadlock detection under the non-moving collector]
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ * Note [Deadlock detection under nonmoving collector]
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
* In GHC the garbage collector is responsible for identifying deadlocked
* programs. Providing for this responsibility is slightly tricky in the
* non-moving collector due to the existence of aging. In particular, the
diff --git a/rts/sm/NonMovingMark.c b/rts/sm/NonMovingMark.c
index 2fd85dc4f0..87b8f774bd 100644
--- a/rts/sm/NonMovingMark.c
+++ b/rts/sm/NonMovingMark.c
@@ -159,7 +159,6 @@ StgIndStatic *debug_caf_list_snapshot = (StgIndStatic*)END_OF_CAF_LIST;
*
* Note [Eager update remembered set flushing]
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- *
* We eagerly flush update remembered sets during minor GCs to avoid scenarios
* like the following which could result in long sync pauses:
*
@@ -199,7 +198,6 @@ StgIndStatic *debug_caf_list_snapshot = (StgIndStatic*)END_OF_CAF_LIST;
*
* Note [Concurrent read barrier on deRefWeak#]
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- *
* In general the non-moving GC assumes that all pointers reachable from a
* marked object are themselves marked (or in the mark queue). However,
* weak pointers are an obvious exception to this rule. In particular,
@@ -596,7 +594,7 @@ inline void updateRemembSetPushThunk(Capability *cap, StgThunk *thunk)
* we update the indirectee to ensure that the thunk's free variables remain
* visible to the concurrent collector.
*
- * See Note [Update rememembered set].
+ * See Note [Update remembered set].
*/
void updateRemembSetPushThunkEager(Capability *cap,
const StgThunkInfoTable *info,
diff --git a/rts/sm/NonMovingScav.c b/rts/sm/NonMovingScav.c
index 4fcbc5881c..56ebe5ffe4 100644
--- a/rts/sm/NonMovingScav.c
+++ b/rts/sm/NonMovingScav.c
@@ -32,7 +32,7 @@ nonmovingScavengeOne (StgClosure *q)
if (gct->failed_to_evac) {
mvar->header.info = &stg_MVAR_DIRTY_info;
- // Note [Dirty flags in the non-moving collector] in NonMoving.c
+ // See Note [Dirty flags in the non-moving collector] in NonMoving.c
markQueuePushClosureGC(&gct->cap->upd_rem_set.queue, (StgClosure *) mvar->head);
markQueuePushClosureGC(&gct->cap->upd_rem_set.queue, (StgClosure *) mvar->tail);
markQueuePushClosureGC(&gct->cap->upd_rem_set.queue, (StgClosure *) mvar->value);
@@ -52,7 +52,7 @@ nonmovingScavengeOne (StgClosure *q)
if (gct->failed_to_evac) {
tvar->header.info = &stg_TVAR_DIRTY_info;
- // Note [Dirty flags in the non-moving collector] in NonMoving.c
+ // See Note [Dirty flags in the non-moving collector] in NonMoving.c
markQueuePushClosureGC(&gct->cap->upd_rem_set.queue, (StgClosure *) tvar->current_value);
markQueuePushClosureGC(&gct->cap->upd_rem_set.queue, (StgClosure *) tvar->first_watch_queue_entry);
} else {
@@ -177,7 +177,7 @@ nonmovingScavengeOne (StgClosure *q)
if (gct->failed_to_evac) {
((StgClosure *)q)->header.info = &stg_MUT_VAR_DIRTY_info;
- // Note [Dirty flags in the non-moving collector] in NonMoving.c
+ // See Note [Dirty flags in the non-moving collector] in NonMoving.c
markQueuePushClosureGC(&gct->cap->upd_rem_set.queue, (StgClosure *) mv->var);
} else {
((StgClosure *)q)->header.info = &stg_MUT_VAR_CLEAN_info;
diff --git a/rts/sm/NonMovingSweep.c b/rts/sm/NonMovingSweep.c
index 1a7c97b7e6..5c4752d4a3 100644
--- a/rts/sm/NonMovingSweep.c
+++ b/rts/sm/NonMovingSweep.c
@@ -370,7 +370,6 @@ void nonmovingSweepStableNameTable()
/* Note [Sweeping stable names in the concurrent collector]
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- *
* When collecting concurrently we need to take care to avoid freeing
* stable names the we didn't sweep this collection cycle. For instance,
* consider the following situation:
diff --git a/rts/sm/Sanity.c b/rts/sm/Sanity.c
index cf4e2dfea6..9c2ccc2c41 100644
--- a/rts/sm/Sanity.c
+++ b/rts/sm/Sanity.c
@@ -909,7 +909,6 @@ static void checkGeneration (generation *gen,
#if defined(THREADED_RTS)
// Note [heap sanity checking with SMP]
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- //
// heap sanity checking doesn't work with SMP for two reasons:
//
// * We can't zero the slop. However, we can sanity-check the heap after a
diff --git a/rts/sm/Scav.c b/rts/sm/Scav.c
index a36ebbb331..b121c010ca 100644
--- a/rts/sm/Scav.c
+++ b/rts/sm/Scav.c
@@ -1858,7 +1858,7 @@ scavenge_stack(StgPtr p, StgPtr stack_end)
case UPDATE_FRAME:
// Note [upd-black-hole]
- //
+ // ~~~~~~~~~~~~~~~~~~~~~
// In SMP, we can get update frames that point to indirections
// when two threads evaluate the same thunk. We do attempt to
// discover this situation in threadPaused(), but it's
diff --git a/rts/sm/Storage.c b/rts/sm/Storage.c
index ede47d3eb2..c592595737 100644
--- a/rts/sm/Storage.c
+++ b/rts/sm/Storage.c
@@ -399,7 +399,6 @@ void listAllBlocks (ListBlocksCb cb, void *user)
/* -----------------------------------------------------------------------------
Note [CAF management]
~~~~~~~~~~~~~~~~~~~~~
-
The entry code for every CAF does the following:
- calls newCAF, which builds a CAF_BLACKHOLE on the heap and atomically
@@ -434,7 +433,6 @@ void listAllBlocks (ListBlocksCb cb, void *user)
------------------
Note [atomic CAF entry]
~~~~~~~~~~~~~~~~~~~~~~~
-
With THREADED_RTS, newCAF() is required to be atomic (see
#5558). This is because if two threads happened to enter the same
CAF simultaneously, they would create two distinct CAF_BLACKHOLEs,
@@ -448,7 +446,6 @@ void listAllBlocks (ListBlocksCb cb, void *user)
------------------
Note [GHCi CAFs]
~~~~~~~~~~~~~~~~
-
For GHCI, we have additional requirements when dealing with CAFs:
- we must *retain* all dynamically-loaded CAFs ever entered,
@@ -470,7 +467,6 @@ void listAllBlocks (ListBlocksCb cb, void *user)
------------------
Note [Static objects under the nonmoving collector]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
Static object management is a bit tricky under the nonmoving collector as we
need to maintain a bit more state than in the moving collector. In
particular, the moving collector uses the low bits of the STATIC_LINK field
@@ -597,6 +593,7 @@ newCAF(StgRegTable *reg, StgIndStatic *caf)
if(keepCAFs && !(highMemDynamic && (void*) caf > (void*) 0x80000000))
{
// Note [dyn_caf_list]
+ // ~~~~~~~~~~~~~~~~~~~
// If we are in GHCi _and_ we are using dynamic libraries,
// then we can't redirect newCAF calls to newRetainedCAF (see below),
// so we make newCAF behave almost like newRetainedCAF.
@@ -990,7 +987,6 @@ accountAllocation(Capability *cap, W_ n)
/* Note [slop on the heap]
* ~~~~~~~~~~~~~~~~~~~~~~~
- *
* We use the term "slop" to refer to allocated memory on the heap which isn't
* occupied by any closure. Usually closures are packet tightly into the heap
* blocks, storage for one immediately following another. However there are
@@ -1549,7 +1545,7 @@ dirty_MVAR(StgRegTable *reg, StgClosure *p, StgClosure *old_val)
/* -----------------------------------------------------------------------------
* Note [allocation accounting]
- *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~
* - When cap->r.rCurrentNusery moves to a new block in the nursery,
* we add the size of the used portion of the previous block to
* cap->total_allocated. (see finishedNurseryBlock())
@@ -1825,7 +1821,6 @@ _bdescr (StgPtr p)
/*
Note [Sources of Block Level Fragmentation]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
Block level fragmentation is when there is unused space in megablocks.
The amount of fragmentation can be calculated as the difference between the
total size of allocated blocks and the total size of allocated megablocks.
diff --git a/rts/sm/Storage.h b/rts/sm/Storage.h
index 48ddcf35f5..00f2943a51 100644
--- a/rts/sm/Storage.h
+++ b/rts/sm/Storage.h
@@ -82,7 +82,7 @@ bool doYouWantToGC(Capability *cap)
/* -----------------------------------------------------------------------------
Allocation accounting
- See [Note allocation accounting] in Storage.c
+ See Note [allocation accounting] in Storage.c
-------------------------------------------------------------------------- */
//
@@ -126,7 +126,7 @@ void move_STACK (StgStack *src, StgStack *dest);
/* -----------------------------------------------------------------------------
Note [STATIC_LINK fields]
-
+ ~~~~~~~~~~~~~~~~~~~~~~~~~
The low 2 bits of the static link field have the following meaning:
00 we haven't seen this static object before
@@ -175,7 +175,7 @@ extern uint32_t prev_static_flag, static_flag;
/* -----------------------------------------------------------------------------
Note [CAF lists]
-
+ ~~~~~~~~~~~~~~~~
dyn_caf_list (CAFs chained through static_link)
This is a chain of all CAFs in the program, used for
dynamically-linked GHCi.
diff --git a/rts/win32/OSMem.c b/rts/win32/OSMem.c
index dde1a74bbb..c192fb5923 100644
--- a/rts/win32/OSMem.c
+++ b/rts/win32/OSMem.c
@@ -547,6 +547,7 @@ void osBindMBlocksToNode(
void* temp;
if (RtsFlags.GcFlags.numa) {
/* Note [base memory]
+ ~~~~~~~~~~~~~~~~~~
I would like to use addr here to specify the base
memory of allocation. The problem is that the address
we are requesting is too high. I can't figure out if it's