summaryrefslogtreecommitdiff
path: root/rts
diff options
context:
space:
mode:
authorBen Gamari <ben@smart-cactus.org>2019-05-11 11:55:42 -0400
committerBen Gamari <ben@smart-cactus.org>2019-10-22 12:18:39 -0400
commite5eda61e768a74723f6e7955cc9247c14ecabc6d (patch)
treec575abc4ed5054b3d9e060825162ee0e4d504df6 /rts
parent26d2d3316dcbf9a49fc7d3252d323d78c0f66e6a (diff)
downloadhaskell-e5eda61e768a74723f6e7955cc9247c14ecabc6d.tar.gz
NonMoving: Optimize bitmap search during allocation
Use memchr instead of a open-coded loop. This is nearly twice as fast in a synthetic benchmark.
Diffstat (limited to 'rts')
-rw-r--r--rts/sm/NonMoving.c16
1 files changed, 14 insertions, 2 deletions
diff --git a/rts/sm/NonMoving.c b/rts/sm/NonMoving.c
index f9fb329154..8b27581386 100644
--- a/rts/sm/NonMoving.c
+++ b/rts/sm/NonMoving.c
@@ -303,8 +303,10 @@ static inline unsigned long log2_ceil(unsigned long x)
// Advance a segment's next_free pointer. Returns true if segment if full.
static bool advance_next_free(struct NonmovingSegment *seg)
{
- uint8_t *bitmap = seg->bitmap;
- unsigned int blk_count = nonmovingSegmentBlockCount(seg);
+ const uint8_t *bitmap = seg->bitmap;
+ const unsigned int blk_count = nonmovingSegmentBlockCount(seg);
+#if defined(NAIVE_ADVANCE_FREE)
+ // reference implementation
for (unsigned int i = seg->next_free+1; i < blk_count; i++) {
if (!bitmap[i]) {
seg->next_free = i;
@@ -313,6 +315,16 @@ static bool advance_next_free(struct NonmovingSegment *seg)
}
seg->next_free = blk_count;
return true;
+#else
+ const uint8_t *c = memchr(&bitmap[seg->next_free+1], 0, blk_count - seg->next_free - 1);
+ if (c == NULL) {
+ seg->next_free = blk_count;
+ return true;
+ } else {
+ seg->next_free = c - bitmap;
+ return false;
+ }
+#endif
}
static struct NonmovingSegment *pop_active_segment(struct NonmovingAllocator *alloca)