summaryrefslogtreecommitdiff
path: root/allchblk.c
diff options
context:
space:
mode:
authorPeter Wang <novalazy@gmail.com>2021-06-09 23:17:51 +0300
committerIvan Maidanski <ivmai@mail.ru>2021-06-09 23:22:05 +0300
commit9245e1154d271b9fd69c5c6de75c6ac37f3badc4 (patch)
treeca0f8d71f6eaeeee0be73e82ee68485441c51908 /allchblk.c
parent57b97be07c514fcc4b608b13768fd2bf637a5899 (diff)
downloadbdwgc-9245e1154d271b9fd69c5c6de75c6ac37f3badc4.tar.gz
Limit number of unmapped regions (Linux)
Issue #324 (bdwgc). When the number of areas exceeds vm.max_map_count on Linux (~65530 by default) then the mmap call in GC_unmap fails with ENOMEM. This commit fixes the issue by establishing a soft limit on the number of unmapped regions. * allchblk.c (get_block_ending_at): New static function. * allchblk.c [USE_MUNMAP && COUNT_UNMAPPED_REGIONS] (calc_num_unmapped_regions_delta): Likewise. * allchblk.c (GC_free_block_ending_at): Use get_block_ending_at(). * allchblk.c [GC_adjust_num_unmapped] (GC_adjust_num_unmapped): New inline function. * allchblk.c [USE_MUNMAP && COUNT_UNMAPPED_REGIONS] (GC_unmap_old): Skip unmapping if GC_num_unmapped_regions exceeds GC_UNMAPPED_REGIONS_SOFT_LIMIT, update GC_num_unmapped_regions value otherwise (before GC_unmap). * allchblk.c [USE_MUNMAP] (GC_merge_unmapped, GC_allochblk_nth): Call GC_adjust_num_unmapped() before GC_remap() and GC_unmap(). * headers.c (GC_next_used_block, GC_prev_block): Move/refine comment to gc_priv.h. * headers.c (GC_next_used_block): Rename to GC_next_block and add allow_free parameter. * include/private/gc_priv.h (GC_next_used_block): Likewise. * include/private/gc_priv.h [USE_MUNMAP && COUNT_UNMAPPED_REGIONS] (_GC_arrays): Add _num_unmapped_regions field. * include/private/gcconfig.h [(M68K || POWERPC || SPARC || I386 || MIPS || OR1K || ALPHA || IA64 || S390 || AARCH64 || ARM32 || CRIS || SH && !SH4 || AVR32 || M32R || X86_64 || HEXAGON || TILEPRO || TILEGX || RISCV) && LINUX] (COUNT_UNMAPPED_REGIONS): Define macro. * include/private/gcconfig.h [USE_MUNMAP && COUNT_UNMAPPED_REGIONS && !GC_UNMAPPED_REGIONS_SOFT_LIMIT] (GC_UNMAPPED_REGIONS_SOFT_LIMIT): Likewise. * os_dep.c [USE_MUNMAP] (GC_unmap_end): Move to gc_priv.h and make it inline. * mark.c (GC_push_next_marked, GC_push_next_marked_dirty, GC_push_next_marked_uncollectable): Use GC_next_block(h, FALSE) instead of GC_next_used_block(h).
Diffstat (limited to 'allchblk.c')
-rw-r--r--allchblk.c104
1 files changed, 97 insertions, 7 deletions
diff --git a/allchblk.c b/allchblk.c
index a70ee73c..d96d3a8a 100644
--- a/allchblk.c
+++ b/allchblk.c
@@ -326,8 +326,8 @@ GC_INLINE void GC_remove_from_fl(hdr *hhdr)
GC_remove_from_fl_at(hhdr, GC_hblk_fl_from_blocks(divHBLKSZ(hhdr->hb_sz)));
}
-/* Return a pointer to the free block ending just before h, if any. */
-STATIC struct hblk * GC_free_block_ending_at(struct hblk *h)
+/* Return a pointer to the block ending just before h, if any. */
+static struct hblk * get_block_ending_at(struct hblk *h)
{
struct hblk * p = h - 1;
hdr * phdr;
@@ -338,16 +338,27 @@ STATIC struct hblk * GC_free_block_ending_at(struct hblk *h)
phdr = HDR(p);
}
if (0 != phdr) {
- if(HBLK_IS_FREE(phdr)) {
+ return p;
+ }
+ p = GC_prev_block(h - 1);
+ if (p) {
+ phdr = HDR(p);
+ if ((ptr_t)p + phdr -> hb_sz == (ptr_t)h) {
return p;
- } else {
- return 0;
}
}
- p = GC_prev_block(h - 1);
+ return NULL;
+}
+
+/* Return a pointer to the free block ending just before h, if any. */
+STATIC struct hblk * GC_free_block_ending_at(struct hblk *h)
+{
+ struct hblk * p = get_block_ending_at(h);
+ hdr * phdr;
+
if (p /* != NULL */) { /* CPPCHECK */
phdr = HDR(p);
- if (HBLK_IS_FREE(phdr) && (ptr_t)p + phdr -> hb_sz == (ptr_t)h) {
+ if (HBLK_IS_FREE(phdr)) {
return p;
}
}
@@ -395,6 +406,60 @@ STATIC void GC_add_to_fl(struct hblk *h, hdr *hhdr)
GC_INNER int GC_unmap_threshold = MUNMAP_THRESHOLD;
+#ifdef COUNT_UNMAPPED_REGIONS
+ /* GC_unmap_old will avoid creating more than this many unmapped regions, */
+ /* but an unmapped region may be split again so exceeding the limit. */
+
+ /* Return the change in number of unmapped regions if the block h swaps */
+ /* from its current state of mapped/unmapped to the opposite state. */
+ static int calc_num_unmapped_regions_delta(struct hblk *h, hdr *hhdr)
+ {
+ struct hblk * prev = get_block_ending_at(h);
+ struct hblk * next;
+ GC_bool prev_unmapped = FALSE;
+ GC_bool next_unmapped = FALSE;
+
+ next = GC_next_block((struct hblk *)((ptr_t)h + hhdr->hb_sz), TRUE);
+ /* Ensure next is contiguous with h. */
+ if ((ptr_t)next != GC_unmap_end((ptr_t)h, (size_t)hhdr->hb_sz)) {
+ next = NULL;
+ }
+ if (prev != NULL) {
+ hdr * prevhdr = HDR(prev);
+ prev_unmapped = !IS_MAPPED(prevhdr);
+ }
+ if (next != NULL) {
+ hdr * nexthdr = HDR(next);
+ next_unmapped = !IS_MAPPED(nexthdr);
+ }
+
+ if (prev_unmapped && next_unmapped) {
+ /* If h unmapped, merge two unmapped regions into one. */
+ /* If h remapped, split one unmapped region into two. */
+ return IS_MAPPED(hhdr) ? -1 : 1;
+ }
+ if (!prev_unmapped && !next_unmapped) {
+ /* If h unmapped, create an isolated unmapped region. */
+ /* If h remapped, remove it. */
+ return IS_MAPPED(hhdr) ? 1 : -1;
+ }
+ /* If h unmapped, merge it with previous or next unmapped region. */
+ /* If h remapped, reduce either previous or next unmapped region. */
+ /* In either way, no change to the number of unmapped regions. */
+ return 0;
+ }
+#endif /* COUNT_UNMAPPED_REGIONS */
+
+/* Update GC_num_unmapped_regions assuming the block h changes */
+/* from its current state of mapped/unmapped to the opposite state. */
+GC_INLINE void GC_adjust_num_unmapped(struct hblk *h GC_ATTR_UNUSED,
+ hdr *hhdr GC_ATTR_UNUSED)
+{
+# ifdef COUNT_UNMAPPED_REGIONS
+ GC_num_unmapped_regions += calc_num_unmapped_regions_delta(h, hhdr);
+# endif
+}
+
/* Unmap blocks that haven't been recently touched. This is the only way */
/* way blocks are ever unmapped. */
GC_INNER void GC_unmap_old(void)
@@ -403,6 +468,12 @@ GC_INNER void GC_unmap_old(void)
if (GC_unmap_threshold == 0)
return; /* unmapping disabled */
+# ifdef COUNT_UNMAPPED_REGIONS
+ /* Skip unmapping if we have already exceeded the soft limit. */
+ /* This forgoes any opportunities to merge unmapped regions though. */
+ if (GC_num_unmapped_regions >= GC_UNMAPPED_REGIONS_SOFT_LIMIT)
+ return;
+# endif
for (i = 0; i <= N_HBLK_FLS; ++i) {
struct hblk * h;
@@ -416,6 +487,19 @@ GC_INNER void GC_unmap_old(void)
/* truncated counter value wrapping is handled correctly). */
if ((unsigned short)(GC_gc_no - hhdr->hb_last_reclaimed) >
(unsigned short)GC_unmap_threshold) {
+# ifdef COUNT_UNMAPPED_REGIONS
+ /* Continue with unmapping the block only if it will not */
+ /* create too many unmapped regions, or if unmapping */
+ /* reduces the number of regions. */
+ int delta = calc_num_unmapped_regions_delta(h, hhdr);
+ signed_word regions = GC_num_unmapped_regions + delta;
+
+ if (delta >= 0 && regions >= GC_UNMAPPED_REGIONS_SOFT_LIMIT) {
+ GC_COND_LOG_PRINTF("Unmapped regions limit reached!\n");
+ return;
+ }
+ GC_num_unmapped_regions = regions;
+# endif
GC_unmap((ptr_t)h, (size_t)hhdr->hb_sz);
hhdr -> hb_flags |= WAS_UNMAPPED;
}
@@ -454,17 +538,21 @@ GC_INNER void GC_merge_unmapped(void)
if (IS_MAPPED(hhdr) && !IS_MAPPED(nexthdr)) {
/* make both consistent, so that we can merge */
if (size > nextsize) {
+ GC_adjust_num_unmapped(next, nexthdr);
GC_remap((ptr_t)next, nextsize);
} else {
+ GC_adjust_num_unmapped(h, hhdr);
GC_unmap((ptr_t)h, size);
GC_unmap_gap((ptr_t)h, size, (ptr_t)next, nextsize);
hhdr -> hb_flags |= WAS_UNMAPPED;
}
} else if (IS_MAPPED(nexthdr) && !IS_MAPPED(hhdr)) {
if (size > nextsize) {
+ GC_adjust_num_unmapped(next, nexthdr);
GC_unmap((ptr_t)next, nextsize);
GC_unmap_gap((ptr_t)h, size, (ptr_t)next, nextsize);
} else {
+ GC_adjust_num_unmapped(h, hhdr);
GC_remap((ptr_t)h, size);
hhdr -> hb_flags &= ~WAS_UNMAPPED;
hhdr -> hb_last_reclaimed = nexthdr -> hb_last_reclaimed;
@@ -717,6 +805,7 @@ GC_allochblk_nth(size_t sz, int kind, unsigned flags, int n, int may_split)
/* Make sure it's mapped before we mangle it. */
# ifdef USE_MUNMAP
if (!IS_MAPPED(hhdr)) {
+ GC_adjust_num_unmapped(hbp, hhdr);
GC_remap((ptr_t)hbp, (size_t)hhdr->hb_sz);
hhdr -> hb_flags &= ~WAS_UNMAPPED;
}
@@ -791,6 +880,7 @@ GC_allochblk_nth(size_t sz, int kind, unsigned flags, int n, int may_split)
if( size_avail >= size_needed ) {
# ifdef USE_MUNMAP
if (!IS_MAPPED(hhdr)) {
+ GC_adjust_num_unmapped(hbp, hhdr);
GC_remap((ptr_t)hbp, (size_t)hhdr->hb_sz);
hhdr -> hb_flags &= ~WAS_UNMAPPED;
/* Note: This may leave adjacent, mapped free blocks. */