summaryrefslogtreecommitdiff
path: root/rts/sm
diff options
context:
space:
mode:
Diffstat (limited to 'rts/sm')
-rw-r--r--rts/sm/CNF.c1
-rw-r--r--rts/sm/Evac.c6
-rw-r--r--rts/sm/GC.c4
-rw-r--r--rts/sm/GCUtils.c2
-rw-r--r--rts/sm/NonMoving.c6
-rw-r--r--rts/sm/NonMovingMark.c4
-rw-r--r--rts/sm/NonMovingScav.c6
-rw-r--r--rts/sm/NonMovingSweep.c1
-rw-r--r--rts/sm/Sanity.c1
-rw-r--r--rts/sm/Scav.c2
-rw-r--r--rts/sm/Storage.c9
-rw-r--r--rts/sm/Storage.h6
12 files changed, 19 insertions, 29 deletions
diff --git a/rts/sm/CNF.c b/rts/sm/CNF.c
index a6bd3b69f0..1f40402c63 100644
--- a/rts/sm/CNF.c
+++ b/rts/sm/CNF.c
@@ -36,7 +36,6 @@
/*
Note [Compact Normal Forms]
~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
A compact normal form (CNF) is a region of memory containing one or more
Haskell data structures. The goals are:
diff --git a/rts/sm/Evac.c b/rts/sm/Evac.c
index 0e0e887b1e..834df459b4 100644
--- a/rts/sm/Evac.c
+++ b/rts/sm/Evac.c
@@ -43,7 +43,6 @@
/* Note [Selector optimisation depth limit]
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- *
* MAX_THUNK_SELECTOR_DEPTH is used to avoid long recursion of
* eval_thunk_selector due to nested selector thunks. Note that this *only*
* counts nested selector thunks, e.g. `fst (fst (... (fst x)))`. The collector
@@ -174,7 +173,6 @@ alloc_for_copy (uint32_t size, uint32_t gen_no)
/*
* Note [Non-moving GC: Marking evacuated objects]
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- *
* When the non-moving collector is in use we must be careful to ensure that any
* references to objects in the non-moving generation from younger generations
* are pushed to the mark queue.
@@ -695,7 +693,7 @@ loop:
if (!HEAP_ALLOCED_GC(q)) {
if (!major_gc) return;
- // Note [Object unloading] in CheckUnload.c
+ // See Note [Object unloading] in CheckUnload.c
if (RTS_UNLIKELY(unload_mark_needed)) {
markObjectCode(q);
}
@@ -933,7 +931,7 @@ loop:
return;
}
// Note [BLACKHOLE pointing to IND]
- //
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
// BLOCKING_QUEUE can be overwritten by IND (see
// wakeBlockingQueue()). However, when this happens we must
// be updating the BLACKHOLE, so the BLACKHOLE's indirectee
diff --git a/rts/sm/GC.c b/rts/sm/GC.c
index 64d0924059..15aef3a9fc 100644
--- a/rts/sm/GC.c
+++ b/rts/sm/GC.c
@@ -158,6 +158,7 @@ StgWord8 the_gc_thread[sizeof(gc_thread) + 64 * sizeof(gen_workspace)]
#endif // THREADED_RTS
/* Note [n_gc_threads]
+ ~~~~~~~~~~~~~~~~~~~
This is a global variable that originally tracked the number of threads
participating in the current gc. It's meaning has diverged from this somewhat,
as it does not distinguish betweeen idle and non-idle threads. An idle thread
@@ -2197,7 +2198,7 @@ bool doIdleGCWork(Capability *cap STG_UNUSED, bool all)
/* Note [Synchronising work stealing]
- *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
* During parallel garbage collections, idle gc threads will steal work from
* other threads. If they see no work to steal then they will wait on a
* condition variabl(gc_running_cv).
@@ -2243,6 +2244,7 @@ bool doIdleGCWork(Capability *cap STG_UNUSED, bool all)
* */
/* Note [Scaling retained memory]
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
* Tickets: #19381 #19359 #14702
*
* After a spike in memory usage we have been conservative about returning
diff --git a/rts/sm/GCUtils.c b/rts/sm/GCUtils.c
index 627c95fb42..9d57bf7d9e 100644
--- a/rts/sm/GCUtils.c
+++ b/rts/sm/GCUtils.c
@@ -203,7 +203,7 @@ push_todo_block(bdescr *bd, gen_workspace *ws)
}
/* Note [big objects]
-
+ ~~~~~~~~~~~~~~~~~~
We can get an ordinary object (CONSTR, FUN, THUNK etc.) that is
larger than a block (see #7919). Let's call these "big objects".
These objects don't behave like large objects - they live in
diff --git a/rts/sm/NonMoving.c b/rts/sm/NonMoving.c
index dd019ec18b..a918f422cf 100644
--- a/rts/sm/NonMoving.c
+++ b/rts/sm/NonMoving.c
@@ -229,7 +229,7 @@ Mutex concurrent_coll_finished_lock;
* - Note [StgStack dirtiness flags and concurrent marking] (TSO.h) describes
* the protocol for concurrent marking of stacks.
*
- * - Note [Nonmoving write barrier in Perform{Take,Put}] (PrimOps.cmm) describes
+ * - Note [Nonmoving write barrier in Perform{Put,Take}] (PrimOps.cmm) describes
* a tricky barrier necessary when resuming threads blocked on MVar
* operations.
*
@@ -328,8 +328,8 @@ Mutex concurrent_coll_finished_lock;
* The implementation details of this are described in Note [Non-moving GC:
* Marking evacuated objects] in Evac.c.
*
- * Note [Deadlock detection under the non-moving collector]
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ * Note [Deadlock detection under nonmoving collector]
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
* In GHC the garbage collector is responsible for identifying deadlocked
* programs. Providing for this responsibility is slightly tricky in the
* non-moving collector due to the existence of aging. In particular, the
diff --git a/rts/sm/NonMovingMark.c b/rts/sm/NonMovingMark.c
index 2fd85dc4f0..87b8f774bd 100644
--- a/rts/sm/NonMovingMark.c
+++ b/rts/sm/NonMovingMark.c
@@ -159,7 +159,6 @@ StgIndStatic *debug_caf_list_snapshot = (StgIndStatic*)END_OF_CAF_LIST;
*
* Note [Eager update remembered set flushing]
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- *
* We eagerly flush update remembered sets during minor GCs to avoid scenarios
* like the following which could result in long sync pauses:
*
@@ -199,7 +198,6 @@ StgIndStatic *debug_caf_list_snapshot = (StgIndStatic*)END_OF_CAF_LIST;
*
* Note [Concurrent read barrier on deRefWeak#]
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- *
* In general the non-moving GC assumes that all pointers reachable from a
* marked object are themselves marked (or in the mark queue). However,
* weak pointers are an obvious exception to this rule. In particular,
@@ -596,7 +594,7 @@ inline void updateRemembSetPushThunk(Capability *cap, StgThunk *thunk)
* we update the indirectee to ensure that the thunk's free variables remain
* visible to the concurrent collector.
*
- * See Note [Update rememembered set].
+ * See Note [Update remembered set].
*/
void updateRemembSetPushThunkEager(Capability *cap,
const StgThunkInfoTable *info,
diff --git a/rts/sm/NonMovingScav.c b/rts/sm/NonMovingScav.c
index 4fcbc5881c..56ebe5ffe4 100644
--- a/rts/sm/NonMovingScav.c
+++ b/rts/sm/NonMovingScav.c
@@ -32,7 +32,7 @@ nonmovingScavengeOne (StgClosure *q)
if (gct->failed_to_evac) {
mvar->header.info = &stg_MVAR_DIRTY_info;
- // Note [Dirty flags in the non-moving collector] in NonMoving.c
+ // See Note [Dirty flags in the non-moving collector] in NonMoving.c
markQueuePushClosureGC(&gct->cap->upd_rem_set.queue, (StgClosure *) mvar->head);
markQueuePushClosureGC(&gct->cap->upd_rem_set.queue, (StgClosure *) mvar->tail);
markQueuePushClosureGC(&gct->cap->upd_rem_set.queue, (StgClosure *) mvar->value);
@@ -52,7 +52,7 @@ nonmovingScavengeOne (StgClosure *q)
if (gct->failed_to_evac) {
tvar->header.info = &stg_TVAR_DIRTY_info;
- // Note [Dirty flags in the non-moving collector] in NonMoving.c
+ // See Note [Dirty flags in the non-moving collector] in NonMoving.c
markQueuePushClosureGC(&gct->cap->upd_rem_set.queue, (StgClosure *) tvar->current_value);
markQueuePushClosureGC(&gct->cap->upd_rem_set.queue, (StgClosure *) tvar->first_watch_queue_entry);
} else {
@@ -177,7 +177,7 @@ nonmovingScavengeOne (StgClosure *q)
if (gct->failed_to_evac) {
((StgClosure *)q)->header.info = &stg_MUT_VAR_DIRTY_info;
- // Note [Dirty flags in the non-moving collector] in NonMoving.c
+ // See Note [Dirty flags in the non-moving collector] in NonMoving.c
markQueuePushClosureGC(&gct->cap->upd_rem_set.queue, (StgClosure *) mv->var);
} else {
((StgClosure *)q)->header.info = &stg_MUT_VAR_CLEAN_info;
diff --git a/rts/sm/NonMovingSweep.c b/rts/sm/NonMovingSweep.c
index 1a7c97b7e6..5c4752d4a3 100644
--- a/rts/sm/NonMovingSweep.c
+++ b/rts/sm/NonMovingSweep.c
@@ -370,7 +370,6 @@ void nonmovingSweepStableNameTable()
/* Note [Sweeping stable names in the concurrent collector]
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- *
* When collecting concurrently we need to take care to avoid freeing
* stable names the we didn't sweep this collection cycle. For instance,
* consider the following situation:
diff --git a/rts/sm/Sanity.c b/rts/sm/Sanity.c
index cf4e2dfea6..9c2ccc2c41 100644
--- a/rts/sm/Sanity.c
+++ b/rts/sm/Sanity.c
@@ -909,7 +909,6 @@ static void checkGeneration (generation *gen,
#if defined(THREADED_RTS)
// Note [heap sanity checking with SMP]
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- //
// heap sanity checking doesn't work with SMP for two reasons:
//
// * We can't zero the slop. However, we can sanity-check the heap after a
diff --git a/rts/sm/Scav.c b/rts/sm/Scav.c
index a36ebbb331..b121c010ca 100644
--- a/rts/sm/Scav.c
+++ b/rts/sm/Scav.c
@@ -1858,7 +1858,7 @@ scavenge_stack(StgPtr p, StgPtr stack_end)
case UPDATE_FRAME:
// Note [upd-black-hole]
- //
+ // ~~~~~~~~~~~~~~~~~~~~~
// In SMP, we can get update frames that point to indirections
// when two threads evaluate the same thunk. We do attempt to
// discover this situation in threadPaused(), but it's
diff --git a/rts/sm/Storage.c b/rts/sm/Storage.c
index ede47d3eb2..c592595737 100644
--- a/rts/sm/Storage.c
+++ b/rts/sm/Storage.c
@@ -399,7 +399,6 @@ void listAllBlocks (ListBlocksCb cb, void *user)
/* -----------------------------------------------------------------------------
Note [CAF management]
~~~~~~~~~~~~~~~~~~~~~
-
The entry code for every CAF does the following:
- calls newCAF, which builds a CAF_BLACKHOLE on the heap and atomically
@@ -434,7 +433,6 @@ void listAllBlocks (ListBlocksCb cb, void *user)
------------------
Note [atomic CAF entry]
~~~~~~~~~~~~~~~~~~~~~~~
-
With THREADED_RTS, newCAF() is required to be atomic (see
#5558). This is because if two threads happened to enter the same
CAF simultaneously, they would create two distinct CAF_BLACKHOLEs,
@@ -448,7 +446,6 @@ void listAllBlocks (ListBlocksCb cb, void *user)
------------------
Note [GHCi CAFs]
~~~~~~~~~~~~~~~~
-
For GHCI, we have additional requirements when dealing with CAFs:
- we must *retain* all dynamically-loaded CAFs ever entered,
@@ -470,7 +467,6 @@ void listAllBlocks (ListBlocksCb cb, void *user)
------------------
Note [Static objects under the nonmoving collector]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
Static object management is a bit tricky under the nonmoving collector as we
need to maintain a bit more state than in the moving collector. In
particular, the moving collector uses the low bits of the STATIC_LINK field
@@ -597,6 +593,7 @@ newCAF(StgRegTable *reg, StgIndStatic *caf)
if(keepCAFs && !(highMemDynamic && (void*) caf > (void*) 0x80000000))
{
// Note [dyn_caf_list]
+ // ~~~~~~~~~~~~~~~~~~~
// If we are in GHCi _and_ we are using dynamic libraries,
// then we can't redirect newCAF calls to newRetainedCAF (see below),
// so we make newCAF behave almost like newRetainedCAF.
@@ -990,7 +987,6 @@ accountAllocation(Capability *cap, W_ n)
/* Note [slop on the heap]
* ~~~~~~~~~~~~~~~~~~~~~~~
- *
* We use the term "slop" to refer to allocated memory on the heap which isn't
* occupied by any closure. Usually closures are packet tightly into the heap
* blocks, storage for one immediately following another. However there are
@@ -1549,7 +1545,7 @@ dirty_MVAR(StgRegTable *reg, StgClosure *p, StgClosure *old_val)
/* -----------------------------------------------------------------------------
* Note [allocation accounting]
- *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~
* - When cap->r.rCurrentNusery moves to a new block in the nursery,
* we add the size of the used portion of the previous block to
* cap->total_allocated. (see finishedNurseryBlock())
@@ -1825,7 +1821,6 @@ _bdescr (StgPtr p)
/*
Note [Sources of Block Level Fragmentation]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
Block level fragmentation is when there is unused space in megablocks.
The amount of fragmentation can be calculated as the difference between the
total size of allocated blocks and the total size of allocated megablocks.
diff --git a/rts/sm/Storage.h b/rts/sm/Storage.h
index 48ddcf35f5..00f2943a51 100644
--- a/rts/sm/Storage.h
+++ b/rts/sm/Storage.h
@@ -82,7 +82,7 @@ bool doYouWantToGC(Capability *cap)
/* -----------------------------------------------------------------------------
Allocation accounting
- See [Note allocation accounting] in Storage.c
+ See Note [allocation accounting] in Storage.c
-------------------------------------------------------------------------- */
//
@@ -126,7 +126,7 @@ void move_STACK (StgStack *src, StgStack *dest);
/* -----------------------------------------------------------------------------
Note [STATIC_LINK fields]
-
+ ~~~~~~~~~~~~~~~~~~~~~~~~~
The low 2 bits of the static link field have the following meaning:
00 we haven't seen this static object before
@@ -175,7 +175,7 @@ extern uint32_t prev_static_flag, static_flag;
/* -----------------------------------------------------------------------------
Note [CAF lists]
-
+ ~~~~~~~~~~~~~~~~
dyn_caf_list (CAFs chained through static_link)
This is a chain of all CAFs in the program, used for
dynamically-linked GHCi.