summaryrefslogtreecommitdiff
path: root/thread_local_alloc.c
diff options
context:
space:
mode:
authorAlessandro Bruni <alessandro.bruni@gmail.com>2016-01-28 00:03:49 +0300
committerIvan Maidanski <ivmai@mail.ru>2016-01-28 00:03:49 +0300
commit989056833ff24691cc26c8bc8b9ba951a08b4a66 (patch)
tree9c8309a5b911a2a7cae1580aeb35e543203c49c3 /thread_local_alloc.c
parent283e7fded73a8428f94fa0e0baa24e5ed2a1f78b (diff)
downloadbdwgc-989056833ff24691cc26c8bc8b9ba951a08b4a66.tar.gz
GC_malloc[_atomic] global and thread-local generalization with kind
* include/gc_inline.h (GC_malloc_kind, GC_malloc_kind_global): New public function declaration. * include/gc_inline.h (GC_MALLOC_WORDS_KIND): New public macro. * include/gc_inline.h (GC_MALLOC_WORDS, GC_MALLOC_ATOMIC_WORDS): Use GC_MALLOC_WORDS_KIND. * include/gc_inline.h (GC_CONS): Use GC_malloc_kind (instead of GC_malloc); reformat code. * include/private/gc_priv.h (MAXOBJKINDS): Allow user-defined values. * include/private/gc_priv.h (GC_core_malloc, GC_core_malloc_atomic): Remove prototype. * malloc.c: Include gc_inline.h (to get GC_malloc_kind prototype). * mallocx.c: Likewise. * malloc.c (GC_generic_malloc_inner, GC_generic_malloc_inner_ignore_off_page, GC_generic_malloc): Add assertion on "k" (kind) argument (should be less than MAXOBJKINDS). * mallocx.c (GC_generic_malloc_ignore_off_page, GC_generic_malloc_many): Likewise. * malloc.c (GC_generic_malloc_uncollectable): Add assertion on "k" argument (should be less than PREDEFINED_KINDS). * malloc.c (GC_core_malloc_atomic, GC_core_malloc): Replace with GC_malloc_kind_global. * malloc.c (GC_malloc_atomic, GC_malloc): Define as a wrapper around GC_malloc_kind_global. * malloc.c (GC_malloc_kind): Redirect to GC_malloc_kind_global if not defined in gc_inline.h (as a macro) or in thread_local_alloc.c. * mallocx.c (GC_generic_or_special_malloc): Call GC_malloc_kind instead of GC_malloc_kind and GC_malloc. * thread_local_alloc.c (GC_malloc, GC_malloc_atomic): Replace with GC_malloc_kind; remove tiny_fl local variable; call GC_malloc_kind_global instead of GC_core_malloc and GC_core_malloc_atomic. * thread_local_alloc.c (GC_destroy_thread_local): Adjust static assert to guard against global _freelists overrun.
Diffstat (limited to 'thread_local_alloc.c')
-rw-r--r--thread_local_alloc.c62
1 files changed, 19 insertions, 43 deletions
diff --git a/thread_local_alloc.c b/thread_local_alloc.c
index 28b8620c..9b194a78 100644
--- a/thread_local_alloc.c
+++ b/thread_local_alloc.c
@@ -138,7 +138,7 @@ GC_INNER void GC_destroy_thread_local(GC_tlfs p)
/* We currently only do this from the thread itself or from */
/* the fork handler for a child process. */
- GC_STATIC_ASSERT(MAXOBJKINDS >= THREAD_FREELISTS_KINDS);
+ GC_STATIC_ASSERT(PREDEFINED_KINDS >= THREAD_FREELISTS_KINDS);
for (i = 0; i < THREAD_FREELISTS_KINDS; ++i) {
return_freelists(p -> _freelists[i], GC_freelists[i]);
}
@@ -156,19 +156,23 @@ GC_INNER void GC_destroy_thread_local(GC_tlfs p)
GC_bool GC_is_thread_tsd_valid(void *tsd);
#endif
-GC_API GC_ATTR_MALLOC void * GC_CALL GC_malloc(size_t bytes)
+GC_API GC_ATTR_MALLOC void * GC_CALL GC_malloc_kind(size_t bytes, int knd)
{
- size_t granules = ROUNDED_UP_GRANULES(bytes);
+ size_t granules;
void *tsd;
void *result;
- void **tiny_fl;
+# if PREDEFINED_KINDS > THREAD_FREELISTS_KINDS
+ if (EXPECT(knd >= THREAD_FREELISTS_KINDS, FALSE)) {
+ return GC_malloc_kind_global(bytes, knd);
+ }
+# endif
# if !defined(USE_PTHREAD_SPECIFIC) && !defined(USE_WIN32_SPECIFIC)
GC_key_t k = GC_thread_key;
if (EXPECT(0 == k, FALSE)) {
/* We haven't yet run GC_init_parallel. That means */
/* we also aren't locking, so this is fairly cheap. */
- return GC_core_malloc(bytes);
+ return GC_malloc_kind_global(bytes, knd);
}
tsd = GC_getspecific(k);
# else
@@ -176,53 +180,25 @@ GC_API GC_ATTR_MALLOC void * GC_CALL GC_malloc(size_t bytes)
# endif
# if !defined(USE_COMPILER_TLS) && !defined(USE_WIN32_COMPILER_TLS)
if (EXPECT(0 == tsd, FALSE)) {
- return GC_core_malloc(bytes);
+ return GC_malloc_kind_global(bytes, knd);
}
# endif
GC_ASSERT(GC_is_initialized);
-
GC_ASSERT(GC_is_thread_tsd_valid(tsd));
-
- tiny_fl = ((GC_tlfs)tsd) -> normal_freelists;
- GC_FAST_MALLOC_GRANS(result, granules, tiny_fl, DIRECT_GRANULES,
- NORMAL, GC_core_malloc(bytes), obj_link(result)=0);
+ granules = ROUNDED_UP_GRANULES(bytes);
+ GC_FAST_MALLOC_GRANS(result, granules,
+ ((GC_tlfs)tsd) -> _freelists[knd], DIRECT_GRANULES,
+ knd, GC_malloc_kind_global(bytes, knd),
+ (void)(knd == PTRFREE ? NULL
+ : (obj_link(result) = 0)));
# ifdef LOG_ALLOCS
- GC_log_printf("GC_malloc(%lu) returned %p, recent GC #%lu\n",
- (unsigned long)bytes, result, (unsigned long)GC_gc_no);
+ GC_log_printf("GC_malloc_kind(%lu, %d) returned %p, recent GC #%lu\n",
+ (unsigned long)bytes, knd, result,
+ (unsigned long)GC_gc_no);
# endif
return result;
}
-GC_API GC_ATTR_MALLOC void * GC_CALL GC_malloc_atomic(size_t bytes)
-{
- size_t granules = ROUNDED_UP_GRANULES(bytes);
- void *tsd;
- void *result;
- void **tiny_fl;
-
-# if !defined(USE_PTHREAD_SPECIFIC) && !defined(USE_WIN32_SPECIFIC)
- GC_key_t k = GC_thread_key;
- if (EXPECT(0 == k, FALSE)) {
- /* We haven't yet run GC_init_parallel. That means */
- /* we also aren't locking, so this is fairly cheap. */
- return GC_core_malloc_atomic(bytes);
- }
- tsd = GC_getspecific(k);
-# else
- tsd = GC_getspecific(GC_thread_key);
-# endif
-# if !defined(USE_COMPILER_TLS) && !defined(USE_WIN32_COMPILER_TLS)
- if (EXPECT(0 == tsd, FALSE)) {
- return GC_core_malloc_atomic(bytes);
- }
-# endif
- GC_ASSERT(GC_is_initialized);
- tiny_fl = ((GC_tlfs)tsd) -> ptrfree_freelists;
- GC_FAST_MALLOC_GRANS(result, granules, tiny_fl, DIRECT_GRANULES, PTRFREE,
- GC_core_malloc_atomic(bytes), (void)0 /* no init */);
- return result;
-}
-
#ifdef GC_GCJ_SUPPORT
# include "atomic_ops.h" /* for AO_compiler_barrier() */