summaryrefslogtreecommitdiff
path: root/malloc.c
diff options
context:
space:
mode:
authorIvan Maidanski <ivmai@mail.ru>2017-10-25 00:06:22 +0300
committerIvan Maidanski <ivmai@mail.ru>2017-10-25 00:06:22 +0300
commit3070f67129d9ef8854592b98b9bd41919930c78b (patch)
treea7d41dd411eb87b082387a9a05f8e31cdce06494 /malloc.c
parent184b5a8911c904f04159d6f75033bdc10c63d13f (diff)
downloadbdwgc-3070f67129d9ef8854592b98b9bd41919930c78b.tar.gz
Make extend_size_map() static
(code refactoring) * include/private/gc_priv.h (GC_extend_size_map): Remove prototype. * malloc.c (GC_extend_size_map): Move the function definition from misc.c; change GC_INNER to STATIC; reformat code; remove j and much_smaller_than_i local variables; add assertions that I_HOLD_LOCK and GC_size_map[i]==0; remove comment that GC_size_map[i] is expected to be zero. * malloc.c (GC_generic_malloc_inner): Capitalize the first letter of the description comment. * misc.c (GC_extend_size_map): Remove the definition.
Diffstat (limited to 'malloc.c')
-rw-r--r--malloc.c55
1 files changed, 54 insertions, 1 deletions
diff --git a/malloc.c b/malloc.c
index dbffee27..cd9468a7 100644
--- a/malloc.c
+++ b/malloc.c
@@ -102,7 +102,60 @@ STATIC ptr_t GC_alloc_large_and_clear(size_t lb, int k, unsigned flags)
return result;
}
-/* allocate lb bytes for an object of kind k. */
+/* Fill in additional entries in GC_size_map, including the i-th one. */
+/* Note that a filled in section of the array ending at n always */
+/* has the length of at least n/4. */
+STATIC void GC_extend_size_map(size_t i)
+{
+ size_t orig_granule_sz = ROUNDED_UP_GRANULES(i);
+ size_t granule_sz;
+ size_t byte_sz = GRANULES_TO_BYTES(orig_granule_sz);
+ /* The size we try to preserve. */
+ /* Close to i, unless this would */
+ /* introduce too many distinct sizes. */
+ size_t smaller_than_i = byte_sz - (byte_sz >> 3);
+ size_t low_limit; /* The lowest indexed entry we initialize. */
+ size_t number_of_objs;
+
+ GC_ASSERT(I_HOLD_LOCK());
+ GC_ASSERT(0 == GC_size_map[i]);
+ if (0 == GC_size_map[smaller_than_i]) {
+ low_limit = byte_sz - (byte_sz >> 2); /* much smaller than i */
+ granule_sz = orig_granule_sz;
+ while (GC_size_map[low_limit] != 0)
+ low_limit++;
+ } else {
+ low_limit = smaller_than_i + 1;
+ while (GC_size_map[low_limit] != 0)
+ low_limit++;
+
+ granule_sz = ROUNDED_UP_GRANULES(low_limit);
+ granule_sz += granule_sz >> 3;
+ if (granule_sz < orig_granule_sz)
+ granule_sz = orig_granule_sz;
+ }
+
+ /* For these larger sizes, we use an even number of granules. */
+ /* This makes it easier to, e.g., construct a 16-byte-aligned */
+ /* allocator even if GRANULE_BYTES is 8. */
+ granule_sz = (granule_sz + 1) & ~1;
+ if (granule_sz > MAXOBJGRANULES)
+ granule_sz = MAXOBJGRANULES;
+
+ /* If we can fit the same number of larger objects in a block, do so. */
+ number_of_objs = HBLK_GRANULES / granule_sz;
+ GC_ASSERT(number_of_objs != 0);
+ granule_sz = (HBLK_GRANULES / number_of_objs) & ~1;
+
+ byte_sz = GRANULES_TO_BYTES(granule_sz) - EXTRA_BYTES;
+ /* We may need one extra byte; do not always */
+ /* fill in GC_size_map[byte_sz]. */
+
+ for (; low_limit <= byte_sz; low_limit++)
+ GC_size_map[low_limit] = granule_sz;
+}
+
+/* Allocate lb bytes for an object of kind k. */
/* Should not be used to directly to allocate */
/* objects such as STUBBORN objects that */
/* require special handling on allocation. */