summaryrefslogtreecommitdiff
path: root/rts/sm
diff options
context:
space:
mode:
authorDaniel Gröber <dxld@darkboxed.org>2020-04-10 02:36:24 +0200
committerMarge Bot <ben+marge-bot@smart-cactus.org>2020-04-14 23:31:01 -0400
commit15fa9bd6dd2d0b8d1fcd7135c85ea0d60853340d (patch)
treea171110ed89c9df0cb0f6bc5884b9e9353c26d7c /rts/sm
parent41230e2601703df0233860be3f7d53f3a01bdbe5 (diff)
downloadhaskell-15fa9bd6dd2d0b8d1fcd7135c85ea0d60853340d.tar.gz
rts: Expand and add more notes regarding slop
Diffstat (limited to 'rts/sm')
-rw-r--r--rts/sm/Sanity.c14
-rw-r--r--rts/sm/Storage.c49
2 files changed, 58 insertions, 5 deletions
diff --git a/rts/sm/Sanity.c b/rts/sm/Sanity.c
index 1c4c75514d..3ac926715a 100644
--- a/rts/sm/Sanity.c
+++ b/rts/sm/Sanity.c
@@ -475,7 +475,7 @@ void checkHeapChain (bdescr *bd)
ASSERT( size >= MIN_PAYLOAD_SIZE + sizeofW(StgHeader) );
p += size;
- /* skip over slop */
+ /* skip over slop, see Note [slop on the heap] */
while (p < bd->free &&
(*p < 0x1000 || !LOOKS_LIKE_INFO_PTR(*p))) { p++; }
}
@@ -796,12 +796,16 @@ static void checkGeneration (generation *gen,
ASSERT(countBlocks(gen->large_objects) == gen->n_large_blocks);
#if defined(THREADED_RTS)
+ // Note [heap sanity checking with SMP]
+ //
// heap sanity checking doesn't work with SMP for two reasons:
- // * we can't zero the slop (see Updates.h). However, we can sanity-check
- // the heap after a major gc, because there is no slop.
//
- // * the nonmoving collector may be mutating its large object lists, unless we
- // were in fact called by the nonmoving collector.
+ // * We can't zero the slop. However, we can sanity-check the heap after a
+ // major gc, because there is no slop. See also Updates.h and Note
+ // [zeroing slop when overwriting closures].
+ //
+ // * The nonmoving collector may be mutating its large object lists,
+ // unless we were in fact called by the nonmoving collector.
if (!after_major_gc) return;
#endif
diff --git a/rts/sm/Storage.c b/rts/sm/Storage.c
index 2a86c19fdb..d9f5fb77ff 100644
--- a/rts/sm/Storage.c
+++ b/rts/sm/Storage.c
@@ -907,6 +907,53 @@ accountAllocation(Capability *cap, W_ n)
}
+/* Note [slop on the heap]
+ *
+ * We use the term "slop" to refer to allocated memory on the heap which isn't
+ * occupied by any closure. Usually closures are packet tightly into the heap
+ * blocks, storage for one immediately following another. However there are
+ * situations where slop is left behind:
+ *
+ * - Allocating large objects (BF_LARGE)
+ *
+ * These are given an entire block, but if they don't fill the entire block
+ * the rest is slop. See allocateMightFail in Storage.c.
+ *
+ * - Allocating pinned objects with alignment (BF_PINNED)
+ *
+ * These are packet into blocks like normal closures, however they
+ * can have alignment constraints and any memory that needed to be skipped for
+ * alignment becomes slop. See allocatePinned in Storage.c.
+ *
+ * - Shrinking (Small)Mutable(Byte)Array#
+ *
+ * The size of these closures can be decreased after allocation, leaving any,
+ * now unused memory, behind as slop. See stg_resizzeMutableByteArrayzh,
+ * stg_shrinkSmallMutableArrayzh, and stg_shrinkMutableByteArrayzh in
+ * PrimOps.cmm.
+ *
+ * This type of slop is extra tricky because it can also be pinned and
+ * large.
+ *
+ * - Overwriting closures
+ *
+ * During GC the RTS overwrites closures with forwarding pointers, this can
+ * leave slop behind depending on the size of the closure being
+ * overwritten. See Note [zeroing slop when overwriting closures].
+ *
+ * Under various ways we actually zero slop so we can linearly scan over blocks
+ * of closures. This trick is used by the sanity checking code and the heap
+ * profiler, see Note [skipping slop in the heap profiler].
+ *
+ * When profiling we zero:
+ * - Pinned object alignment slop, see MEMSET_IF_PROFILING_W in allocatePinned.
+ * - Shrunk array slop, see OVERWRITING_MUTABLE_CLOSURE.
+ *
+ * When performing LDV profiling or using a (single threaded) debug RTS we zero
+ * slop even when overwriting immutable closures, see Note [zeroing slop when
+ * overwriting closures].
+ */
+
/* -----------------------------------------------------------------------------
StgPtr allocate (Capability *cap, W_ n)
@@ -1069,6 +1116,8 @@ allocateMightFail (Capability *cap, W_ n)
/**
* When profiling we zero the space used for alignment. This allows us to
* traverse pinned blocks in the heap profiler.
+ *
+ * See Note [skipping slop in the heap profiler]
*/
#if defined(PROFILING)
#define MEMSET_IF_PROFILING_W(p, val, len) memset(p, val, (len) * sizeof(W_))