summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorBen Gamari <ben@smart-cactus.org>2019-05-16 17:14:38 -0400
committerBen Gamari <ben@smart-cactus.org>2019-07-10 01:41:01 -0400
commit46c421eea1b3bd418b719c8a721a772e68e58b4a (patch)
treec75043be22abe9d147cfb3675ec22541db36fb3c
parent7ca0d5316cca2912d07b95bd4f67557bd536856f (diff)
downloadhaskell-46c421eea1b3bd418b719c8a721a772e68e58b4a.tar.gz
NonMoving: Prefetch segment header
-rw-r--r--rts/sm/NonMoving.h9
-rw-r--r--rts/sm/NonMovingMark.c1
2 files changed, 8 insertions, 2 deletions
diff --git a/rts/sm/NonMoving.h b/rts/sm/NonMoving.h
index d1624a494d..165c7b1dfd 100644
--- a/rts/sm/NonMoving.h
+++ b/rts/sm/NonMoving.h
@@ -202,13 +202,18 @@ INLINE_HEADER void *nonmovingSegmentGetBlock(struct NonmovingSegment *seg, nonmo
// Get the segment which a closure resides in. Assumes that pointer points into
// non-moving heap.
-INLINE_HEADER struct NonmovingSegment *nonmovingGetSegment(StgPtr p)
+INLINE_HEADER struct NonmovingSegment *nonmovingGetSegment_unchecked(StgPtr p)
{
- ASSERT(HEAP_ALLOCED_GC(p) && (Bdescr(p)->flags & BF_NONMOVING));
const uintptr_t mask = ~NONMOVING_SEGMENT_MASK;
return (struct NonmovingSegment *) (((uintptr_t) p) & mask);
}
+INLINE_HEADER struct NonmovingSegment *nonmovingGetSegment(StgPtr p)
+{
+ ASSERT(HEAP_ALLOCED_GC(p) && (Bdescr(p)->flags & BF_NONMOVING));
+ return nonmovingGetSegment_unchecked(p);
+}
+
INLINE_HEADER nonmoving_block_idx nonmovingGetBlockIdx(StgPtr p)
{
ASSERT(HEAP_ALLOCED_GC(p) && (Bdescr(p)->flags & BF_NONMOVING));
diff --git a/rts/sm/NonMovingMark.c b/rts/sm/NonMovingMark.c
index 201c8fc958..5e2a90209b 100644
--- a/rts/sm/NonMovingMark.c
+++ b/rts/sm/NonMovingMark.c
@@ -770,6 +770,7 @@ static MarkQueueEnt markQueuePop (MarkQueue *q)
// MarkQueueEnt encoding always places the pointer to the object to be
// marked first.
prefetchForRead(&new.mark_closure.p->header.info);
+ prefetchForRead(&nonmovingGetSegment_unchecked((StgPtr) new.mark_closure.p)->block_size);
q->prefetch_queue[i] = new;
i = (i + 1) % MARK_PREFETCH_QUEUE_DEPTH;
}