summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorGabriel Scherer <gabriel.scherer@gmail.com>2023-04-08 22:24:08 +0200
committerGabriel Scherer <gabriel.scherer@gmail.com>2023-04-08 22:24:08 +0200
commitd551b3d6ffb1e3a3edbd3ff2aacee8b41442eebd (patch)
tree2af0aa164f49eb1669bdb4c91b6afa2411864986
parent3339f05cbbf26728073ae7723a38512849bc7b2e (diff)
downloadocaml-d551b3d6ffb1e3a3edbd3ff2aacee8b41442eebd.tar.gz
major_gc.c: rename PAGE into CHUNK to avoid the PAGE_MASK macro
Issue #12101 reports a build warning on android where a macro PAGE_MASK is already defined by the system. We rename this macro into CHUNK_MASK, along with some documentation of the representation of the compressed mark stack.
-rw-r--r--runtime/major_gc.c44
1 files changed, 25 insertions, 19 deletions
diff --git a/runtime/major_gc.c b/runtime/major_gc.c
index 52506c86c1..7eb7ae802a 100644
--- a/runtime/major_gc.c
+++ b/runtime/major_gc.c
@@ -44,14 +44,14 @@
#define MARK_STACK_INIT_SIZE (1 << 12)
/* The mark stack consists of two parts:
- 1. the stack - consisting of spans of fields that need to be marked, and
- 2. the compressed stack - consisting of entries (k, bitfield)
- where the bitfield represents word offsets from k that need to
- be marked.
+ 1. the stack - a dynamic array of spans of fields that need to be marked, and
+ 2. the compressed stack - a bitset of fields that need to be marked.
The stack is bounded relative to the heap size. When the stack
overflows the bound, then entries from the stack are compressed and
- transferred into the compressed stack.
+ transferred into the compressed stack, expect for "large" entries,
+ spans of more than BITS_PER_WORD entries, that are more compactly
+ represented as spans and remain on the uncompressed stack.
When the stack is empty, the compressed stack is processed.
The compressed stack iterator marks the point up to which
@@ -939,10 +939,17 @@ again:
return budget;
}
-/* compressed mark stack */
-#define PAGE_MASK (~(uintnat)(BITS_PER_WORD-1))
-#define PTR_TO_PAGE(v) (((uintnat)(v)/sizeof(value)) & PAGE_MASK)
-#define PTR_TO_PAGE_OFFSET(v) ((((uintnat)(v)/sizeof(value)) & ~PAGE_MASK))
+/* Compressed mark stack
+
+ We use a bitset, implemented as a hashtable storing word-sized
+ integers (uintnat). Each integer represents a "chunk" of addresses
+ that may or may not be present in the stack.
+ */
+#define CHUNK_MASK (~(uintnat)(BITS_PER_WORD-1))
+#define PTR_TO_CHUNK(v) (((uintnat)(v)/sizeof(value)) & CHUNK_MASK)
+#define PTR_TO_CHUNK_OFFSET(v) ((((uintnat)(v)/sizeof(value)) & ~CHUNK_MASK))
+#define CHUNK_AND_OFFSET_TO_PTR(chunk, offset) \
+ ((value*) ((chunk + offset) * sizeof(value)))
/* mark until the budget runs out or marking is done */
static intnat mark(intnat budget) {
@@ -950,12 +957,11 @@ static intnat mark(intnat budget) {
while (budget > 0 && !domain_state->marking_done) {
budget = do_some_marking(domain_state->mark_stack, budget);
if (budget > 0) {
- int i;
struct mark_stack* mstk = domain_state->mark_stack;
addrmap_iterator it = mstk->compressed_stack_iter;
if (caml_addrmap_iter_ok(&mstk->compressed_stack, it)) {
- uintnat k = caml_addrmap_iter_key(&mstk->compressed_stack, it);
- value v = caml_addrmap_iter_value(&mstk->compressed_stack, it);
+ uintnat chunk = caml_addrmap_iter_key(&mstk->compressed_stack, it);
+ uintnat bitset = caml_addrmap_iter_value(&mstk->compressed_stack, it);
/* NB: must update the iterator here, as possible that
mark_slice_darken could lead to the mark stack being pruned
@@ -963,9 +969,9 @@ static intnat mark(intnat budget) {
mstk->compressed_stack_iter =
caml_addrmap_next(&mstk->compressed_stack, it);
- for(i=0; i<BITS_PER_WORD; i++) {
- if(v & ((uintnat)1 << i)) {
- value* p = (value*)((k + i)*sizeof(value));
+ for(int ofs=0; ofs<BITS_PER_WORD; ofs++) {
+ if(bitset & ((uintnat)1 << ofs)) {
+ value* p = CHUNK_AND_OFFSET_TO_PTR(chunk, ofs);
mark_slice_darken(domain_state->mark_stack, *p, &budget);
}
}
@@ -1757,18 +1763,18 @@ void caml_finish_sweeping (void)
}
Caml_inline int add_addr(struct addrmap* amap, value* ptr) {
- uintnat k = PTR_TO_PAGE(ptr);
- uintnat flag = (uintnat)1 << PTR_TO_PAGE_OFFSET(ptr);
+ uintnat chunk = PTR_TO_CHUNK(ptr);
+ uintnat flag = (uintnat)1 << PTR_TO_CHUNK_OFFSET(ptr);
int new_entry = 0;
- value* amap_pos = caml_addrmap_insert_pos(amap, k);
+ value* amap_pos = caml_addrmap_insert_pos(amap, chunk);
if (*amap_pos == ADDRMAP_NOT_PRESENT) {
new_entry = 1;
*amap_pos = 0;
}
- CAMLassert(ptr == (value*)((k + PTR_TO_PAGE_OFFSET(ptr))*sizeof(value)));
+ CAMLassert(ptr == CHUNK_AND_OFFSET_TO_PTR(chunk, PTR_TO_CHUNK_OFFSET(ptr)));
if (!(*amap_pos & flag)) {
*amap_pos |= flag;