summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorBen Gamari <ben@smart-cactus.org>2020-12-10 09:31:00 -0500
committerMarge Bot <ben+marge-bot@smart-cactus.org>2021-01-07 00:10:15 -0500
commit30f7137d1acc48499840d9a106ecce6109fd8e7a (patch)
treea7a792bf6f014b574fde8032a6f0488567dd01dc
parentfb81f2edccaa8a02b0b3f68e82f0c795346e3b54 (diff)
downloadhaskell-30f7137d1acc48499840d9a106ecce6109fd8e7a.tar.gz
rts: Zero shrunk array slop in vanilla RTS
But only when profiling or DEBUG are enabled. Fixes #17572.
-rw-r--r--includes/Cmm.h6
-rw-r--r--rts/sm/Storage.c13
-rw-r--r--utils/deriveConstants/Main.hs2
3 files changed, 16 insertions, 5 deletions
diff --git a/includes/Cmm.h b/includes/Cmm.h
index 574c60a1b3..fdb4badf9a 100644
--- a/includes/Cmm.h
+++ b/includes/Cmm.h
@@ -630,7 +630,11 @@
#else
#define OVERWRITING_CLOSURE_SIZE(c, size) /* nothing */
#define OVERWRITING_CLOSURE(c) /* nothing */
-#define OVERWRITING_CLOSURE_MUTABLE(c, off) /* nothing */
+/* This is used to zero slop after shrunk arrays. It is important that we do
+ * this whenever profiling is enabled as described in Note [slop on the heap]
+ * in Storage.c. */
+#define OVERWRITING_CLOSURE_MUTABLE(c, off) \
+ if (TO_W_(RtsFlags_ProfFlags_doHeapProfile(RtsFlags)) != 0) { foreign "C" overwritingMutableClosureOfs(c "ptr", off); }
#endif
// Memory barriers.
diff --git a/rts/sm/Storage.c b/rts/sm/Storage.c
index a43719fc9b..41abbfc81c 100644
--- a/rts/sm/Storage.c
+++ b/rts/sm/Storage.c
@@ -953,14 +953,19 @@ accountAllocation(Capability *cap, W_ n)
* profiler, see Note [skipping slop in the heap profiler].
*
* In general we zero:
+ *
* - Pinned object alignment slop, see MEMSET_SLOP_W in allocatePinned.
* - Large object alignment slop, see MEMSET_SLOP_W in allocatePinned.
- * This is necessary even in the vanilla RTS since the user may trigger a heap
- * census via +RTS -hT even when not linking against the profiled RTS.
- *
- * Only when profiling we zero:
* - Shrunk array slop, see OVERWRITING_CLOSURE_MUTABLE.
*
+ * Note that this is necessary even in the vanilla (e.g. non-profiling) RTS
+ * since the user may trigger a heap census via +RTS -hT, which can be used
+ * even when not linking against the profiled RTS. Failing to zero slop
+ * due to array shrinking has resulted in a few nasty bugs (#17572, #9666).
+ * However, since array shrink may result in large amounts of slop (unlike
+ * alignment), we take care to only zero such slop when heap profiling or DEBUG
+ * are enabled.
+ *
* When performing LDV profiling or using a (single threaded) debug RTS we zero
* slop even when overwriting immutable closures, see Note [zeroing slop when
* overwriting closures].
diff --git a/utils/deriveConstants/Main.hs b/utils/deriveConstants/Main.hs
index 1c648d2e6f..c384a7058f 100644
--- a/utils/deriveConstants/Main.hs
+++ b/utils/deriveConstants/Main.hs
@@ -561,6 +561,8 @@ wanteds os = concat
,structField C "StgCompactNFDataBlock" "owner"
,structField C "StgCompactNFDataBlock" "next"
+ ,structField_ C "RtsFlags_ProfFlags_doHeapProfile"
+ "RTS_FLAGS" "ProfFlags.doHeapProfile"
,structField_ C "RtsFlags_ProfFlags_showCCSOnException"
"RTS_FLAGS" "ProfFlags.showCCSOnException"
,structField_ C "RtsFlags_DebugFlags_apply"