summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorIvan Maidanski <ivmai@mail.ru>2014-03-30 11:24:12 +0400
committerIvan Maidanski <ivmai@mail.ru>2014-03-30 11:24:12 +0400
commit5a4bcb2aa28f7af312922b1ecbb2f729c94caab1 (patch)
tree9a8ca423774aef14a795fdd8c68c00db86cc9b0d
parentff6c3d9fb8e7065ffe74cb780b2c1f54868152e2 (diff)
downloadbdwgc-5a4bcb2aa28f7af312922b1ecbb2f729c94caab1.tar.gz
Define ROUNDUP_PAGESIZE, ROUNDUP_GRANULE_SIZE macros (code refactoring)
* alloc.c (GC_expand_hp_inner): Use ROUNDUP_PAGESIZE(). * checksums.c (GC_record_fault, GC_was_faulted): Likewise. * os_dep.c (GC_unix_mmap_get_mem, GC_wince_get_mem, GC_unmap_start, GC_remove_protection): Likewise. * headers.c (GC_scratch_alloc): Use ROUNDUP_GRANULE_SIZE(). * malloc.c (GC_alloc_large): Likewise. * mallocx.c (GC_malloc_many): Likewise. * headers.c (GC_scratch_alloc): Use ROUNDUP_PAGESIZE() (only if USE_MMAP). * include/private/gc_priv.h (ROUNDUP_GRANULE_SIZE, ROUNDUP_PAGESIZE): Define macro to round up a value to a multiple of a granule or a page, respectively.
-rw-r--r--alloc.c9
-rw-r--r--checksums.c8
-rw-r--r--headers.c9
-rw-r--r--include/private/gc_priv.h10
-rw-r--r--malloc.c3
-rw-r--r--mallocx.c4
-rw-r--r--os_dep.c14
7 files changed, 23 insertions, 34 deletions
diff --git a/alloc.c b/alloc.c
index be6d4ed3..25bb31ea 100644
--- a/alloc.c
+++ b/alloc.c
@@ -1169,14 +1169,7 @@ GC_INNER GC_bool GC_expand_hp_inner(word n)
/* heap to expand soon. */
if (n < MINHINCR) n = MINHINCR;
- bytes = n * HBLKSIZE;
- /* Make sure bytes is a multiple of GC_page_size */
- {
- word mask = GC_page_size - 1;
- bytes += mask;
- bytes &= ~mask;
- }
-
+ bytes = ROUNDUP_PAGESIZE(n * HBLKSIZE);
if (GC_max_heapsize != 0 && GC_heapsize + bytes > GC_max_heapsize) {
/* Exceeded self-imposed limit */
return(FALSE);
diff --git a/checksums.c b/checksums.c
index f5ad843a..f0f902a3 100644
--- a/checksums.c
+++ b/checksums.c
@@ -41,10 +41,8 @@ STATIC size_t GC_n_faulted = 0;
void GC_record_fault(struct hblk * h)
{
- word page = (word)h;
+ word page = ROUNDUP_PAGESIZE((word)h);
- page += GC_page_size - 1;
- page &= ~(GC_page_size - 1);
if (GC_n_faulted >= NSUMS) ABORT("write fault log overflowed");
GC_faulted[GC_n_faulted++] = page;
}
@@ -52,10 +50,8 @@ void GC_record_fault(struct hblk * h)
STATIC GC_bool GC_was_faulted(struct hblk *h)
{
size_t i;
- word page = (word)h;
+ word page = ROUNDUP_PAGESIZE((word)h);
- page += GC_page_size - 1;
- page &= ~(GC_page_size - 1);
for (i = 0; i < GC_n_faulted; ++i) {
if (GC_faulted[i] == page) return TRUE;
}
diff --git a/headers.c b/headers.c
index 8e92e384..69f15751 100644
--- a/headers.c
+++ b/headers.c
@@ -119,8 +119,7 @@ GC_INNER ptr_t GC_scratch_alloc(size_t bytes)
{
register ptr_t result = scratch_free_ptr;
- bytes += GRANULE_BYTES-1;
- bytes &= ~(GRANULE_BYTES-1);
+ bytes = ROUNDUP_GRANULE_SIZE(bytes);
scratch_free_ptr += bytes;
if ((word)scratch_free_ptr <= (word)GC_scratch_end_ptr) {
return(result);
@@ -132,8 +131,7 @@ GC_INNER ptr_t GC_scratch_alloc(size_t bytes)
/* Undo the damage, and get memory directly */
bytes_to_get = bytes;
# ifdef USE_MMAP
- bytes_to_get += GC_page_size - 1;
- bytes_to_get &= ~(GC_page_size - 1);
+ bytes_to_get = ROUNDUP_PAGESIZE(bytes_to_get);
# endif
result = (ptr_t)GET_MEM(bytes_to_get);
GC_add_to_our_memory(result, bytes_to_get);
@@ -148,8 +146,7 @@ GC_INNER ptr_t GC_scratch_alloc(size_t bytes)
scratch_free_ptr -= bytes;
bytes_to_get = bytes;
# ifdef USE_MMAP
- bytes_to_get += GC_page_size - 1;
- bytes_to_get &= ~(GC_page_size - 1);
+ bytes_to_get = ROUNDUP_PAGESIZE(bytes_to_get);
# endif
result = (ptr_t)GET_MEM(bytes_to_get);
GC_add_to_our_memory(result, bytes_to_get);
diff --git a/include/private/gc_priv.h b/include/private/gc_priv.h
index 7903e463..fe77e3a2 100644
--- a/include/private/gc_priv.h
+++ b/include/private/gc_priv.h
@@ -763,6 +763,10 @@ GC_EXTERN GC_warn_proc GC_current_warn_proc;
# define HBLKDISPL(objptr) (((size_t) (objptr)) & (HBLKSIZE-1))
+/* Round up allocation size (in bytes) to a multiple of a granule. */
+#define ROUNDUP_GRANULE_SIZE(bytes) \
+ (((bytes) + (GRANULE_BYTES - 1)) & ~(GRANULE_BYTES - 1))
+
/* Round up byte allocation requests to integral number of words, etc. */
# define ROUNDED_UP_GRANULES(n) \
BYTES_TO_GRANULES((n) + (GRANULE_BYTES - 1 + EXTRA_BYTES))
@@ -1398,13 +1402,17 @@ GC_EXTERN word GC_n_heap_sects; /* Number of separately added heap */
GC_EXTERN word GC_page_size;
+/* Round up allocation size to a multiple of a page size. */
+/* GC_setpagesize() is assumed to be already invoked. */
+#define ROUNDUP_PAGESIZE(bytes) \
+ (((bytes) + GC_page_size - 1) & ~(GC_page_size - 1))
+
#if defined(MSWIN32) || defined(MSWINCE) || defined(CYGWIN32)
struct _SYSTEM_INFO;
GC_EXTERN struct _SYSTEM_INFO GC_sysinfo;
GC_INNER GC_bool GC_is_heap_base(ptr_t p);
#endif
-
GC_EXTERN word GC_black_list_spacing;
/* Average number of bytes between blacklisted */
/* blocks. Approximate. */
diff --git a/malloc.c b/malloc.c
index 836c916d..58aa55ab 100644
--- a/malloc.c
+++ b/malloc.c
@@ -46,8 +46,7 @@ GC_INNER ptr_t GC_alloc_large(size_t lb, int k, unsigned flags)
ptr_t result;
GC_bool retry = FALSE;
- /* Round up to a multiple of a granule. */
- lb = (lb + GRANULE_BYTES - 1) & ~(GRANULE_BYTES - 1);
+ lb = ROUNDUP_GRANULE_SIZE(lb);
n_blocks = OBJ_SZ_TO_BLOCKS(lb);
if (!EXPECT(GC_is_initialized, TRUE)) GC_init();
/* Do our share of marking work */
diff --git a/mallocx.c b/mallocx.c
index 75ee0e28..6cbcf6c1 100644
--- a/mallocx.c
+++ b/mallocx.c
@@ -445,8 +445,8 @@ GC_API void GC_CALL GC_generic_malloc_many(size_t lb, int k, void **result)
GC_API void * GC_CALL GC_malloc_many(size_t lb)
{
void *result;
- GC_generic_malloc_many((lb + EXTRA_BYTES + GRANULE_BYTES-1)
- & ~(GRANULE_BYTES-1),
+
+ GC_generic_malloc_many(ROUNDUP_GRANULE_SIZE(lb + EXTRA_BYTES),
NORMAL, &result);
return result;
}
diff --git a/os_dep.c b/os_dep.c
index 7fbeda3e..e19fcc06 100644
--- a/os_dep.c
+++ b/os_dep.c
@@ -2069,8 +2069,7 @@ STATIC ptr_t GC_unix_mmap_get_mem(word bytes)
# undef IGNORE_PAGES_EXECUTABLE
if (result == MAP_FAILED) return(0);
- last_addr = (ptr_t)result + bytes + GC_page_size - 1;
- last_addr = (ptr_t)((word)last_addr & ~(GC_page_size - 1));
+ last_addr = (ptr_t)ROUNDUP_PAGESIZE((word)result + bytes);
# if !defined(LINUX)
if (last_addr == 0) {
/* Oops. We got the end of the address space. This isn't */
@@ -2190,8 +2189,7 @@ void * os2_alloc(size_t bytes)
ptr_t result = 0; /* initialized to prevent warning. */
word i;
- /* Round up allocation size to multiple of page size */
- bytes = (bytes + GC_page_size-1) & ~(GC_page_size-1);
+ bytes = ROUNDUP_PAGESIZE(bytes);
/* Try to find reserved, uncommitted pages */
for (i = 0; i < GC_n_heap_bases; i++) {
@@ -2366,9 +2364,8 @@ void * os2_alloc(size_t bytes)
/* Return 0 if the block is too small to make this feasible. */
STATIC ptr_t GC_unmap_start(ptr_t start, size_t bytes)
{
- ptr_t result;
- /* Round start to next page boundary. */
- result = (ptr_t)((word)(start + GC_page_size - 1) & ~(GC_page_size - 1));
+ ptr_t result = (ptr_t)ROUNDUP_PAGESIZE((word)start);
+
if ((word)(result + GC_page_size) > (word)(start + bytes)) return 0;
return result;
}
@@ -3269,8 +3266,7 @@ GC_INNER void GC_remove_protection(struct hblk *h, word nblocks,
# endif
if (!GC_dirty_maintained) return;
h_trunc = (struct hblk *)((word)h & ~(GC_page_size-1));
- h_end = (struct hblk *)(((word)(h + nblocks) + GC_page_size-1)
- & ~(GC_page_size-1));
+ h_end = (struct hblk *)ROUNDUP_PAGESIZE((word)(h + nblocks));
if (h_end == h_trunc + 1 &&
get_pht_entry_from_index(GC_dirty_pages, PHT_HASH(h_trunc))) {
/* already marked dirty, and hence unprotected. */