summaryrefslogtreecommitdiff
path: root/thread_local_alloc.c
diff options
context:
space:
mode:
authorhboehm <hboehm>2007-08-04 06:26:29 +0000
committerIvan Maidanski <ivmai@mail.ru>2011-07-26 21:06:41 +0400
commit87b2e2602e766e448e7d65d7a7a7eb3e3cd020cb (patch)
tree0162d9d9de90bf1acb3653e4876d39b0e52d6439 /thread_local_alloc.c
parent6db625e8a2a7d5d8b4faf8747b5688cd64f3949e (diff)
downloadbdwgc-87b2e2602e766e448e7d65d7a7a7eb3e3cd020cb.tar.gz
2007-08-03 Hans Boehm <Hans.Boehm@hp.com>
* alloc.c, backgraph.c, headers.c, include/private/gc_priv.h: Maintain GC_our_memory and GC_n_memory. * dbg_mlc.c (GC_print_smashed_obj): Improve message. (GC_print_all_smashed_proc): Pass client object address instead of base. * dyn_load.c (sort_heap_sects): New. (GC_register_map_entries): Register sections that are contiguous and merged with our heap. * malloc.c, os_dep.c (GC_text_mapping): Check for just base name of libraries. * malloc.c (calloc): Check for special callers even with USE_PROC_FOR_LIBRARIES. Move assertion. Add rudimentary malloc/free tracing. * misc.c: No longer call GC_init_lib_bounds explicitly. * thread_local_alloc.c (GC_malloc, GC_malloc_atomic): Always initialize on demand. * tests/test.c: Call GC_INIT only when required.
Diffstat (limited to 'thread_local_alloc.c')
-rw-r--r--thread_local_alloc.c35
1 files changed, 28 insertions, 7 deletions
diff --git a/thread_local_alloc.c b/thread_local_alloc.c
index f747b355..fa1499e2 100644
--- a/thread_local_alloc.c
+++ b/thread_local_alloc.c
@@ -73,11 +73,11 @@ static void return_freelists(void **fl, void **gfl)
/* Each thread structure must be initialized. */
/* This call must be made from the new thread. */
-/* Caller holds allocation lock. */
void GC_init_thread_local(GC_tlfs p)
{
int i;
+ GC_ASSERT(I_HOLD_LOCK());
if (!keys_initialized) {
if (0 != GC_key_create(&GC_thread_key, 0)) {
ABORT("Failed to create key for local allocator");
@@ -141,7 +141,7 @@ void * GC_malloc(size_t bytes)
void *result;
void **tiny_fl;
-# if defined(REDIRECT_MALLOC) && !defined(USE_PTHREAD_SPECIFIC)
+# if !defined(USE_PTHREAD_SPECIFIC) && !defined(USE_WIN32_SPECIFIC)
GC_key_t k = GC_thread_key;
if (EXPECT(0 == k, 0)) {
/* We haven't yet run GC_init_parallel. That means */
@@ -150,14 +150,14 @@ void * GC_malloc(size_t bytes)
}
tsd = GC_getspecific(k);
# else
- GC_ASSERT(GC_is_initialized);
tsd = GC_getspecific(GC_thread_key);
# endif
-# if defined(REDIRECT_MALLOC) && defined(USE_PTHREAD_SPECIFIC)
- if (EXPECT(NULL == tsd, 0)) {
+# if defined(USE_PTHREAD_SPECIFIC) || defined(USE_WIN32_SPECIFIC)
+ if (EXPECT(0 == tsd, 0)) {
return GC_core_malloc(bytes);
}
# endif
+ GC_ASSERT(GC_is_initialized);
# ifdef GC_ASSERTIONS
/* We can't check tsd correctly, since we don't have access to */
/* the right declarations. But we can check that it's close. */
@@ -175,18 +175,37 @@ void * GC_malloc(size_t bytes)
tiny_fl = ((GC_tlfs)tsd) -> normal_freelists;
GC_FAST_MALLOC_GRANS(result, granules, tiny_fl, DIRECT_GRANULES,
NORMAL, GC_core_malloc(bytes), obj_link(result)=0);
+# ifdef LOG_ALLOCS
+ GC_err_printf("GC_malloc(%d) = %p : %d\n", bytes, result, GC_gc_no);
+# endif
return result;
}
void * GC_malloc_atomic(size_t bytes)
{
size_t granules = ROUNDED_UP_GRANULES(bytes);
+ void *tsd;
void *result;
void **tiny_fl;
+# if !defined(USE_PTHREAD_SPECIFIC) && !defined(USE_WIN32_SPECIFIC)
+ GC_key_t k = GC_thread_key;
+ if (EXPECT(0 == k, 0)) {
+ /* We haven't yet run GC_init_parallel. That means */
+ /* we also aren't locking, so this is fairly cheap. */
+ return GC_core_malloc(bytes);
+ }
+ tsd = GC_getspecific(k);
+# else
+ tsd = GC_getspecific(GC_thread_key);
+# endif
+# if defined(USE_PTHREAD_SPECIFIC) || defined(USE_WIN32_SPECIFIC)
+ if (EXPECT(0 == tsd, 0)) {
+ return GC_core_malloc(bytes);
+ }
+# endif
GC_ASSERT(GC_is_initialized);
- tiny_fl = ((GC_tlfs)GC_getspecific(GC_thread_key))
- -> ptrfree_freelists;
+ tiny_fl = ((GC_tlfs)tsd) -> ptrfree_freelists;
GC_FAST_MALLOC_GRANS(result, bytes, tiny_fl, DIRECT_GRANULES,
PTRFREE, GC_core_malloc_atomic(bytes), 0/* no init */);
return result;
@@ -220,6 +239,8 @@ extern int GC_gcj_kind;
/* are not necessarily free. And there may be cache fill order issues. */
/* For now, we punt with incremental GC. This probably means that */
/* incremental GC should be enabled before we fork a second thread. */
+/* Unlike the other thread local allocation calls, we assume that the */
+/* collector has been explicitly initialized. */
void * GC_gcj_malloc(size_t bytes,
void * ptr_to_struct_containing_descr)
{