summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--darwin_stop_world.c19
-rw-r--r--include/private/pthread_support.h152
-rw-r--r--pthread_stop_world.c70
-rw-r--r--pthread_support.c236
-rw-r--r--win32_threads.c128
5 files changed, 338 insertions, 267 deletions
diff --git a/darwin_stop_world.c b/darwin_stop_world.c
index 35d2aaa5..5debf480 100644
--- a/darwin_stop_world.c
+++ b/darwin_stop_world.c
@@ -142,6 +142,7 @@ STATIC ptr_t GC_stack_range_for(ptr_t *phi, thread_act_t thread, GC_thread p,
mach_port_t my_thread, ptr_t *paltstack_lo,
ptr_t *paltstack_hi, GC_bool *pfound_me)
{
+ GC_stack_context_t crtn = p -> crtn;
ptr_t lo;
if (thread == my_thread) {
@@ -152,9 +153,9 @@ STATIC ptr_t GC_stack_range_for(ptr_t *phi, thread_act_t thread, GC_thread p,
# endif
*pfound_me = TRUE;
} else if (p != NULL && (p -> flags & DO_BLOCKING) != 0) {
- lo = p -> stack_ptr;
+ lo = crtn -> stack_ptr;
# ifndef DARWIN_DONT_PARSE_STACK
- *phi = p -> topOfStack;
+ *phi = crtn -> topOfStack;
# endif
} else {
@@ -326,14 +327,14 @@ STATIC ptr_t GC_stack_range_for(ptr_t *phi, thread_act_t thread, GC_thread p,
UNUSED_ARG(paltstack_hi);
# else
/* p is guaranteed to be non-NULL regardless of GC_query_task_threads. */
- *phi = EXPECT((p->flags & MAIN_THREAD) == 0, TRUE) ? p->stack_end
+ *phi = EXPECT((p -> flags & MAIN_THREAD) == 0, TRUE) ? crtn -> stack_end
: GC_stackbottom;
- if (p->altstack != NULL && (word)p->altstack <= (word)lo
- && (word)lo <= (word)p->altstack + p->altstack_size) {
+ if (crtn -> altstack != NULL && (word)(crtn -> altstack) <= (word)lo
+ && (word)lo <= (word)(crtn -> altstack) + crtn -> altstack_size) {
*paltstack_lo = lo;
- *paltstack_hi = p->altstack + p->altstack_size;
- lo = p->normstack;
- *phi = lo + p->normstack_size;
+ *paltstack_hi = crtn -> altstack + crtn -> altstack_size;
+ lo = crtn -> normstack;
+ *phi = lo + crtn -> normstack_size;
} else
# endif
/* else */ {
@@ -404,7 +405,7 @@ GC_INNER void GC_push_all_stacks(void)
if (lo) {
GC_ASSERT((word)lo <= (word)hi);
total_size += hi - lo;
- GC_push_all_stack_sections(lo, hi, p->traced_stack_sect);
+ GC_push_all_stack_sections(lo, hi, p -> crtn -> traced_stack_sect);
}
if (altstack_lo) {
total_size += altstack_hi - altstack_lo;
diff --git a/include/private/pthread_support.h b/include/private/pthread_support.h
index 8f74bfdf..6c638b2a 100644
--- a/include/private/pthread_support.h
+++ b/include/private/pthread_support.h
@@ -44,6 +44,76 @@
EXTERN_C_BEGIN
+typedef struct GC_StackContext_Rep {
+# if defined(THREAD_SANITIZER) && defined(SIGNAL_BASED_STOP_WORLD)
+ char dummy[sizeof(oh)]; /* A dummy field to avoid TSan false */
+ /* positive about the race between */
+ /* GC_has_other_debug_info and */
+ /* GC_suspend_handler_inner (which */
+ /* sets stack_ptr). */
+# endif
+
+# if !defined(GC_NO_THREADS_DISCOVERY) && defined(GC_WIN32_THREADS)
+ volatile
+# endif
+ ptr_t stack_end; /* Cold end of the stack (except for */
+ /* main thread on non-Windows). */
+ /* On Windows: 0 means entry invalid; */
+ /* not in_use implies stack_end is 0. */
+
+ ptr_t stack_ptr; /* Valid only in some platform-specific states. */
+
+# ifdef GC_WIN32_THREADS
+# define ADDR_LIMIT ((ptr_t)GC_WORD_MAX)
+ ptr_t last_stack_min; /* Last known minimum (hottest) address */
+ /* in stack or ADDR_LIMIT if unset. */
+# ifdef I386
+ ptr_t initial_stack_base; /* The cold end of the stack saved by */
+ /* GC_record_stack_base (never modified */
+ /* by GC_set_stackbottom); used for the */
+ /* old way of the coroutines support. */
+# endif
+# elif defined(GC_DARWIN_THREADS) && !defined(DARWIN_DONT_PARSE_STACK)
+ ptr_t topOfStack; /* Result of GC_FindTopOfStack(0); */
+ /* valid only if the thread is blocked; */
+ /* non-NULL value means already set. */
+# endif
+
+# if defined(E2K) || defined(IA64)
+ ptr_t backing_store_end; /* Note: may reference data in GC heap. */
+ ptr_t backing_store_ptr;
+# endif
+
+# ifdef GC_WIN32_THREADS
+ /* For now, alt-stack is not implemented for Win32. */
+# else
+ ptr_t altstack; /* The start of the alt-stack if there */
+ /* is one, NULL otherwise. */
+ word altstack_size; /* The size of the alt-stack if exists. */
+ ptr_t normstack; /* The start and size of the "normal" */
+ /* stack (set by GC_register_altstack). */
+ word normstack_size;
+# endif
+
+# ifndef GC_NO_FINALIZATION
+ unsigned char finalizer_nested;
+ char fnlz_pad[1]; /* Explicit alignment (for some rare */
+ /* compilers such as bcc32 and wcc32). */
+ unsigned short finalizer_skipped;
+ /* Used by GC_check_finalizer_nested() */
+ /* to minimize the level of recursion */
+ /* when a client finalizer allocates */
+ /* memory (initially both are 0). */
+# endif
+
+ struct GC_traced_stack_sect_s *traced_stack_sect;
+ /* Points to the "frame" data held in */
+ /* stack by the innermost */
+ /* GC_call_with_gc_active() of this */
+ /* stack (thread); may be NULL. */
+
+} *GC_stack_context_t;
+
#ifdef GC_WIN32_THREADS
typedef DWORD thread_id_t;
# define thread_id_self() GetCurrentThreadId()
@@ -54,19 +124,7 @@ EXTERN_C_BEGIN
# define THREAD_ID_EQUAL(id1, id2) THREAD_EQUAL(id1, id2)
#endif
-/* We use the allocation lock to protect thread-related data structures. */
-
-/* The set of all known threads. We intercept thread creation and */
-/* join. Protected by the GC lock. */
typedef struct GC_Thread_Rep {
-# ifdef THREAD_SANITIZER
- char dummy[sizeof(oh)]; /* A dummy field to avoid TSan false */
- /* positive about the race between */
- /* GC_has_other_debug_info and */
- /* GC_suspend_handler_inner (which */
- /* sets stack_ptr). */
-# endif
-
union {
# if !defined(GC_NO_THREADS_DISCOVERY) && defined(GC_WIN32_THREADS)
volatile AO_t in_use; /* Updated without lock. We assert */
@@ -89,6 +147,8 @@ typedef struct GC_Thread_Rep {
/* not yet have registered the join.) */
} tm; /* table_management */
+ GC_stack_context_t crtn;
+
thread_id_t id; /* hash table key */
# ifdef GC_DARWIN_THREADS
mach_port_t mach_thread;
@@ -129,10 +189,10 @@ typedef struct GC_Thread_Rep {
# endif
# if (defined(GC_HAVE_PTHREAD_EXIT) || !defined(GC_NO_PTHREAD_CANCEL)) \
&& defined(GC_PTHREADS)
-# define DISABLED_GC 0x10 /* Collections are disabled while the */
+# define DISABLED_GC 0x10 /* Collections are disabled while the */
/* thread is exiting. */
# endif
-# define DO_BLOCKING 0x20 /* Thread is in do-blocking state. */
+# define DO_BLOCKING 0x20 /* Thread is in the do-blocking state. */
/* If set, thread will acquire GC lock */
/* before any pointer manipulation, and */
/* has set its SP value. Thus, it does */
@@ -141,43 +201,9 @@ typedef struct GC_Thread_Rep {
# define IS_SUSPENDED 0x40 /* Thread is suspended by SuspendThread. */
# endif
-# ifndef GC_NO_FINALIZATION
- unsigned char finalizer_nested;
- /* Placed right after flags field for */
- /* the alignment purpose. */
- unsigned short finalizer_skipped;
- /* Used by GC_check_finalizer_nested() */
- /* to minimize the level of recursion */
- /* when a client finalizer allocates */
- /* memory (initially both are 0). */
-# else
- char no_fnlz_pad[3]; /* Explicit alignment (for some rare */
+ char flags_pad[sizeof(word) - 1 /* sizeof(flags) */];
+ /* Explicit alignment (for some rare */
/* compilers such as bcc32 and wcc32). */
-# endif
-
- ptr_t stack_end; /* Cold end of the stack (except for */
- /* main thread). */
- /* On Windows: 0 means entry invalid; */
- /* not in_use implies stack_end is 0. */
-
- ptr_t stack_ptr; /* Valid only in some platform-specific states. */
-
-# ifdef GC_WIN32_THREADS
-# define ADDR_LIMIT ((ptr_t)GC_WORD_MAX)
- ptr_t last_stack_min; /* Last known minimum (hottest) address */
- /* in stack or ADDR_LIMIT if unset. */
-# ifdef I386
- ptr_t initial_stack_base; /* The cold end of the stack saved by */
- /* GC_record_stack_base (never modified */
- /* by GC_set_stackbottom). */
-# endif
-# endif
-
-# if defined(GC_DARWIN_THREADS) && !defined(DARWIN_DONT_PARSE_STACK)
- ptr_t topOfStack; /* Result of GC_FindTopOfStack(0); */
- /* valid only if the thread is blocked; */
- /* non-NULL value means already set. */
-# endif
# ifdef SIGNAL_BASED_STOP_WORLD
volatile AO_t last_stop_count;
@@ -195,28 +221,6 @@ typedef struct GC_Thread_Rep {
# endif
# endif
-# ifdef GC_WIN32_THREADS
- /* For now, alt-stack is not implemented for Win32. */
-# else
- ptr_t altstack; /* The start of the alt-stack if there */
- /* is one, NULL otherwise. */
- word altstack_size; /* The size of the alt-stack if exists. */
- ptr_t normstack; /* The start and size of the "normal" */
- /* stack (set by GC_register_altstack). */
- word normstack_size;
-# endif
-
-# if defined(E2K) || defined(IA64)
- ptr_t backing_store_end; /* Note: may reference data in GC heap. */
- ptr_t backing_store_ptr;
-# endif
-
- struct GC_traced_stack_sect_s *traced_stack_sect;
- /* Points to the "frame" data held in */
- /* stack by the innermost */
- /* GC_call_with_gc_active() of this */
- /* thread. May be NULL. */
-
# ifdef GC_PTHREADS
void *status; /* The value returned from the thread. */
/* Used only to avoid premature */
@@ -277,6 +281,8 @@ typedef struct GC_Thread_Rep {
^ NUMERIC_THREAD_ID(id)) % THREAD_TABLE_SZ)
#endif
+/* The set of all known threads. We intercept thread creation and */
+/* join/detach. Protected by the allocation lock. */
GC_EXTERN GC_thread GC_threads[THREAD_TABLE_SZ];
#ifndef MAX_MARKERS
@@ -319,7 +325,7 @@ GC_EXTERN GC_thread GC_threads[THREAD_TABLE_SZ];
# endif /* PARALLEL_MARK */
GC_INNER GC_thread GC_new_thread(thread_id_t);
- GC_INNER void GC_record_stack_base(GC_thread me,
+ GC_INNER void GC_record_stack_base(GC_stack_context_t crtn,
const struct GC_stack_base *sb);
GC_INNER GC_thread GC_register_my_thread_inner(
const struct GC_stack_base *sb,
diff --git a/pthread_stop_world.c b/pthread_stop_world.c
index 993db7d9..4099ae95 100644
--- a/pthread_stop_world.c
+++ b/pthread_stop_world.c
@@ -298,7 +298,7 @@ STATIC void GC_suspend_handler_inner(ptr_t dummy, void *context);
# define GC_lookup_thread_async GC_lookup_thread
#endif
-GC_INLINE void GC_store_stack_ptr(GC_thread me)
+GC_INLINE void GC_store_stack_ptr(GC_stack_context_t crtn)
{
/* There is no data race between the suspend handler (storing */
/* stack_ptr) and GC_push_all_stacks (fetching stack_ptr) because */
@@ -308,14 +308,15 @@ GC_INLINE void GC_store_stack_ptr(GC_thread me)
/* and fetched (by GC_push_all_stacks) using the atomic primitives to */
/* avoid the related TSan warning. */
# ifdef SPARC
- ao_store_async((volatile AO_t *)&(me -> stack_ptr),
+ ao_store_async((volatile AO_t *)&(crtn -> stack_ptr),
(AO_t)GC_save_regs_in_stack());
/* TODO: regs saving already done by GC_with_callee_saves_pushed */
# else
# ifdef IA64
- me -> backing_store_ptr = GC_save_regs_in_stack();
+ crtn -> backing_store_ptr = GC_save_regs_in_stack();
# endif
- ao_store_async((volatile AO_t *)&(me -> stack_ptr), (AO_t)GC_approx_sp());
+ ao_store_async((volatile AO_t *)&(crtn -> stack_ptr),
+ (AO_t)GC_approx_sp());
# endif
}
@@ -323,6 +324,7 @@ STATIC void GC_suspend_handler_inner(ptr_t dummy, void *context)
{
pthread_t self;
GC_thread me;
+ GC_stack_context_t crtn;
# ifdef E2K
ptr_t bs_lo;
size_t stack_size;
@@ -363,12 +365,13 @@ STATIC void GC_suspend_handler_inner(ptr_t dummy, void *context)
RESTORE_CANCEL(cancel_state);
return;
}
- GC_store_stack_ptr(me);
+ crtn = me -> crtn;
+ GC_store_stack_ptr(crtn);
# ifdef E2K
- GC_ASSERT(NULL == me -> backing_store_end);
+ GC_ASSERT(NULL == crtn -> backing_store_end);
GET_PROCEDURE_STACK_LOCAL(&bs_lo, &stack_size);
- me -> backing_store_end = bs_lo;
- me -> backing_store_ptr = bs_lo + stack_size;
+ crtn -> backing_store_end = bs_lo;
+ crtn -> backing_store_ptr = bs_lo + stack_size;
# endif
# ifdef GC_ENABLE_SUSPEND_THREAD
suspend_cnt = (word)ao_load_async(&(me -> ext_suspend_cnt));
@@ -405,10 +408,10 @@ STATIC void GC_suspend_handler_inner(ptr_t dummy, void *context)
GC_log_printf("Continuing %p\n", (void *)self);
# endif
# ifdef E2K
- GC_ASSERT(me -> backing_store_end == bs_lo);
+ GC_ASSERT(crtn -> backing_store_end == bs_lo);
FREE_PROCEDURE_STACK_LOCAL(bs_lo, stack_size);
- me -> backing_store_ptr = NULL;
- me -> backing_store_end = NULL;
+ crtn -> backing_store_ptr = NULL;
+ crtn -> backing_store_end = NULL;
# endif
# ifndef GC_NETBSD_THREADS_WORKAROUND
@@ -797,10 +800,11 @@ GC_INNER void GC_push_all_stacks(void)
# if defined(E2K) || defined(IA64)
GC_bool is_self = FALSE;
# endif
+ GC_stack_context_t crtn = p -> crtn;
if (KNOWN_FINISHED(p)) continue;
++nthreads;
- traced_stack_sect = p -> traced_stack_sect;
+ traced_stack_sect = crtn -> traced_stack_sect;
if (THREAD_EQUAL(p -> id, self)) {
GC_ASSERT((p -> flags & DO_BLOCKING) == 0);
# ifdef SPARC
@@ -810,7 +814,7 @@ GC_INNER void GC_push_all_stacks(void)
# ifdef IA64
bs_hi = GC_save_regs_in_stack();
# elif defined(E2K)
- GC_ASSERT(NULL == p -> backing_store_end);
+ GC_ASSERT(NULL == crtn -> backing_store_end);
(void)GC_save_regs_in_stack();
{
size_t stack_size;
@@ -824,25 +828,25 @@ GC_INNER void GC_push_all_stacks(void)
is_self = TRUE;
# endif
} else {
- lo = (ptr_t)AO_load((volatile AO_t *)&(p -> stack_ptr));
+ lo = (ptr_t)AO_load((volatile AO_t *)&(crtn -> stack_ptr));
# ifdef IA64
- bs_hi = p -> backing_store_ptr;
+ bs_hi = crtn -> backing_store_ptr;
# elif defined(E2K)
- bs_lo = p -> backing_store_end;
- bs_hi = p -> backing_store_ptr;
+ bs_lo = crtn -> backing_store_end;
+ bs_hi = crtn -> backing_store_ptr;
# endif
if (traced_stack_sect != NULL
- && traced_stack_sect->saved_stack_ptr == lo) {
+ && traced_stack_sect -> saved_stack_ptr == lo) {
/* If the thread has never been stopped since the recent */
/* GC_call_with_gc_active invocation then skip the top */
/* "stack section" as stack_ptr already points to. */
- traced_stack_sect = traced_stack_sect->prev;
+ traced_stack_sect = traced_stack_sect -> prev;
}
}
if (EXPECT((p -> flags & MAIN_THREAD) == 0, TRUE)) {
- hi = p -> stack_end;
+ hi = crtn -> stack_end;
# ifdef IA64
- bs_lo = p -> backing_store_end;
+ bs_lo = crtn -> backing_store_end;
# endif
} else {
/* The original stack. */
@@ -855,10 +859,10 @@ GC_INNER void GC_push_all_stacks(void)
GC_log_printf("Stack for thread %p is [%p,%p)\n",
(void *)p->id, (void *)lo, (void *)hi);
# endif
- if (0 == lo) ABORT("GC_push_all_stacks: sp not set!");
- if (p->altstack != NULL && (word)p->altstack <= (word)lo
- && (word)lo <= (word)p->altstack + p->altstack_size) {
- hi = p->altstack + p->altstack_size;
+ if (NULL == lo) ABORT("GC_push_all_stacks: sp not set!");
+ if (crtn -> altstack != NULL && (word)(crtn -> altstack) <= (word)lo
+ && (word)lo <= (word)(crtn -> altstack) + crtn -> altstack_size) {
+ hi = crtn -> altstack + crtn -> altstack_size;
/* FIXME: Need to scan the normal stack too, but how ? */
/* FIXME: Assume stack grows down */
}
@@ -1085,8 +1089,8 @@ GC_INNER void GC_stop_world(void)
__asm__ __volatile__ ("push %r14"); \
__asm__ __volatile__ ("push %r15"); \
__asm__ __volatile__ ("mov %%esp, %0" \
- : "=m" (GC_nacl_gc_thread_self -> stack_ptr)); \
- BCOPY(GC_nacl_gc_thread_self -> stack_ptr, \
+ : "=m" (GC_nacl_gc_thread_self -> crtn -> stack_ptr)); \
+ BCOPY(GC_nacl_gc_thread_self -> crtn -> stack_ptr, \
GC_nacl_gc_thread_self -> reg_storage, \
NACL_GC_REG_STORAGE_SIZE * sizeof(ptr_t)); \
__asm__ __volatile__ ("naclasp $48, %r15"); \
@@ -1099,8 +1103,8 @@ GC_INNER void GC_stop_world(void)
__asm__ __volatile__ ("push %esi"); \
__asm__ __volatile__ ("push %edi"); \
__asm__ __volatile__ ("mov %%esp, %0" \
- : "=m" (GC_nacl_gc_thread_self -> stack_ptr)); \
- BCOPY(GC_nacl_gc_thread_self -> stack_ptr, \
+ : "=m" (GC_nacl_gc_thread_self -> crtn -> stack_ptr)); \
+ BCOPY(GC_nacl_gc_thread_self -> crtn -> stack_ptr, \
GC_nacl_gc_thread_self -> reg_storage, \
NACL_GC_REG_STORAGE_SIZE * sizeof(ptr_t));\
__asm__ __volatile__ ("add $16, %esp"); \
@@ -1110,10 +1114,10 @@ GC_INNER void GC_stop_world(void)
do { \
__asm__ __volatile__ ("push {r4-r8,r10-r12,lr}"); \
__asm__ __volatile__ ("mov r0, %0" \
- : : "r" (&GC_nacl_gc_thread_self -> stack_ptr)); \
+ : : "r" (&GC_nacl_gc_thread_self -> crtn -> stack_ptr)); \
__asm__ __volatile__ ("bic r0, r0, #0xc0000000"); \
__asm__ __volatile__ ("str sp, [r0]"); \
- BCOPY(GC_nacl_gc_thread_self -> stack_ptr, \
+ BCOPY(GC_nacl_gc_thread_self -> crtn -> stack_ptr, \
GC_nacl_gc_thread_self -> reg_storage, \
NACL_GC_REG_STORAGE_SIZE * sizeof(ptr_t)); \
__asm__ __volatile__ ("add sp, sp, #40"); \
@@ -1127,7 +1131,7 @@ GC_INNER void GC_stop_world(void)
{
if (GC_nacl_thread_idx != -1) {
NACL_STORE_REGS();
- GC_nacl_gc_thread_self -> stack_ptr = GC_approx_sp();
+ GC_nacl_gc_thread_self -> crtn -> stack_ptr = GC_approx_sp();
GC_nacl_thread_parked[GC_nacl_thread_idx] = 1;
}
}
@@ -1150,7 +1154,7 @@ GC_INNER void GC_stop_world(void)
/* so don't bother storing registers again, the GC has a set. */
if (!GC_nacl_thread_parked[GC_nacl_thread_idx]) {
NACL_STORE_REGS();
- GC_nacl_gc_thread_self -> stack_ptr = GC_approx_sp();
+ GC_nacl_gc_thread_self -> crtn -> stack_ptr = GC_approx_sp();
}
GC_nacl_thread_parked[GC_nacl_thread_idx] = 1;
while (GC_nacl_park_threads_now) {
diff --git a/pthread_support.c b/pthread_support.c
index b9fc4491..08d52375 100644
--- a/pthread_support.c
+++ b/pthread_support.c
@@ -604,10 +604,10 @@
GC_INNER GC_thread GC_threads[THREAD_TABLE_SZ] = {0};
/* It may not be safe to allocate when we register the first thread. */
-/* As "next" and "status" fields are unused, no need to push this */
-/* (but "backing_store_end" field should be pushed on E2K). */
+/* Note that next and status fields are unused, but there might be some */
+/* other fields (crtn and backing_store_end) to be pushed. */
+static struct GC_StackContext_Rep first_crtn;
static struct GC_Thread_Rep first_thread;
-static GC_bool first_thread_used = FALSE;
#ifdef GC_ASSERTIONS
GC_INNER GC_bool GC_thr_initialized = FALSE;
@@ -628,8 +628,9 @@ void GC_push_thread_structures(void)
/* else */ {
GC_PUSH_ALL_SYM(GC_threads);
# ifdef E2K
- GC_PUSH_ALL_SYM(first_thread.backing_store_end);
+ GC_PUSH_ALL_SYM(first_crtn.backing_store_end);
# endif
+ GC_PUSH_ALL_SYM(first_thread.crtn);
}
# if defined(THREAD_LOCAL_ALLOC) && defined(USE_CUSTOM_SPECIFIC)
GC_PUSH_ALL_SYM(GC_thread_key);
@@ -639,9 +640,17 @@ void GC_push_thread_structures(void)
#if defined(MPROTECT_VDB) && defined(GC_WIN32_THREADS)
GC_INNER void GC_win32_unprotect_thread(GC_thread t)
{
- if (!GC_win32_dll_threads && GC_auto_incremental && t != &first_thread) {
- GC_ASSERT(SMALL_OBJ(GC_size(t)));
- GC_remove_protection(HBLKPTR(t), 1, FALSE);
+ if (!GC_win32_dll_threads && GC_auto_incremental) {
+ GC_stack_context_t crtn = t -> crtn;
+
+ if (crtn != &first_crtn) {
+ GC_ASSERT(SMALL_OBJ(GC_size(crtn)));
+ GC_remove_protection(HBLKPTR(crtn), 1, FALSE);
+ }
+ if (t != &first_thread) {
+ GC_ASSERT(SMALL_OBJ(GC_size(t)));
+ GC_remove_protection(HBLKPTR(t), 1, FALSE);
+ }
}
}
#endif /* MPROTECT_VDB && GC_WIN32_THREADS */
@@ -685,23 +694,33 @@ GC_INNER_WIN32THREAD GC_thread GC_new_thread(thread_id_t id)
break;
}
# endif
- if (EXPECT(!first_thread_used, FALSE)) {
+ if (EXPECT(NULL == first_thread.crtn, FALSE)) {
result = &first_thread;
- first_thread_used = TRUE;
+ first_thread.crtn = &first_crtn;
GC_ASSERT(NULL == GC_threads[hv]);
# ifdef CPPCHECK
-# ifdef THREAD_SANITIZER
- GC_noop1((unsigned char)(result -> dummy[0]));
+ GC_noop1((unsigned char)first_thread.flags_pad[0]);
+# if defined(THREAD_SANITIZER) && defined(SIGNAL_BASED_STOP_WORLD)
+ GC_noop1((unsigned char)first_crtn.dummy[0]);
# endif
-# ifdef GC_NO_FINALIZATION
- GC_noop1((unsigned char)(result -> no_fnlz_pad[0]));
+# ifndef GC_NO_FINALIZATION
+ GC_noop1((unsigned char)first_crtn.fnlz_pad[0]);
# endif
# endif
} else {
+ GC_stack_context_t crtn;
+
GC_ASSERT(!GC_win32_dll_threads);
- result = (struct GC_Thread_Rep *)
- GC_INTERNAL_MALLOC(sizeof(struct GC_Thread_Rep), NORMAL);
- if (EXPECT(NULL == result, FALSE)) return NULL;
+ crtn = (GC_stack_context_t)GC_INTERNAL_MALLOC(
+ sizeof(struct GC_StackContext_Rep), NORMAL);
+ if (EXPECT(NULL == crtn, FALSE)) return NULL;
+ result = (GC_thread)GC_INTERNAL_MALLOC(sizeof(struct GC_Thread_Rep),
+ NORMAL);
+ if (EXPECT(NULL == result, FALSE)) {
+ GC_INTERNAL_FREE(crtn);
+ return NULL;
+ }
+ result -> crtn = crtn;
}
/* The id field is not set here. */
# ifdef USE_TKILL_ON_ANDROID
@@ -771,6 +790,8 @@ GC_INNER_WIN32THREAD void GC_delete_thread(thread_id_t id)
# ifdef GC_DARWIN_THREADS
mach_port_deallocate(mach_task_self(), p -> mach_thread);
# endif
+ GC_ASSERT(p -> crtn != &first_crtn);
+ GC_INTERNAL_FREE(p -> crtn);
GC_INTERNAL_FREE(p);
}
}
@@ -802,7 +823,7 @@ GC_INNER_WIN32THREAD void GC_delete_thread(thread_id_t id)
/* In this branch asynchronous changes to (*t) are possible. */
/* It's not allowed to call GC_printf (and the friends) here, */
/* see GC_stop_world() in win32_threads.c for the information. */
- t -> stack_end = NULL;
+ t -> crtn -> stack_end = NULL;
t -> id = 0;
t -> flags = 0; /* !IS_SUSPENDED */
# ifdef RETRY_GET_THREAD_CONTEXT
@@ -864,7 +885,7 @@ GC_INNER GC_thread GC_lookup_thread(thread_id_t id)
GC_ASSERT(I_HOLD_LOCK());
me = GC_lookup_thread(thread_id_self());
- me -> finalizer_nested = 0;
+ me -> crtn -> finalizer_nested = 0;
}
/* Checks and updates the thread-local level of finalizers recursion. */
@@ -874,21 +895,22 @@ GC_INNER GC_thread GC_lookup_thread(thread_id_t id)
/* Called by GC_notify_or_invoke_finalizers() only. */
GC_INNER unsigned char *GC_check_finalizer_nested(void)
{
- GC_thread me;
+ GC_stack_context_t crtn;
unsigned nesting_level;
GC_ASSERT(I_HOLD_LOCK());
- me = GC_lookup_thread(thread_id_self());
- nesting_level = me -> finalizer_nested;
+ crtn = GC_lookup_thread(thread_id_self()) -> crtn;
+ nesting_level = crtn -> finalizer_nested;
if (nesting_level) {
/* We are inside another GC_invoke_finalizers(). */
/* Skip some implicitly-called GC_invoke_finalizers() */
/* depending on the nesting (recursion) level. */
- if (++me->finalizer_skipped < (1U << nesting_level)) return NULL;
- me -> finalizer_skipped = 0;
+ if (++(crtn -> finalizer_skipped) < (1U << nesting_level))
+ return NULL;
+ crtn -> finalizer_skipped = 0;
}
- me -> finalizer_nested = (unsigned char)(nesting_level + 1);
- return &(me -> finalizer_nested);
+ crtn -> finalizer_nested = (unsigned char)(nesting_level + 1);
+ return &(crtn -> finalizer_nested);
}
#endif /* !GC_NO_FINALIZATION */
@@ -930,6 +952,7 @@ GC_API void GC_CALL GC_register_altstack(void *normstack,
UNUSED_ARG(altstack_size);
#else
GC_thread me;
+ GC_stack_context_t crtn;
thread_id_t self_id = thread_id_self();
DCL_LOCK_STATE;
@@ -939,10 +962,11 @@ GC_API void GC_CALL GC_register_altstack(void *normstack,
/* We are called before GC_thr_init. */
me = &first_thread;
}
- me -> normstack = (ptr_t)normstack;
- me -> normstack_size = normstack_size;
- me -> altstack = (ptr_t)altstack;
- me -> altstack_size = altstack_size;
+ crtn = me -> crtn;
+ crtn -> normstack = (ptr_t)normstack;
+ crtn -> normstack_size = normstack_size;
+ crtn -> altstack = (ptr_t)altstack;
+ crtn -> altstack_size = altstack_size;
UNLOCK();
#endif
}
@@ -968,14 +992,16 @@ GC_API void GC_CALL GC_register_altstack(void *normstack,
# endif
for (i = 0; i < THREAD_TABLE_SZ; i++) {
for (p = GC_threads[i]; p != NULL; p = p -> tm.next) {
- if (p -> stack_end != NULL) {
+ GC_stack_context_t crtn = p -> crtn;
+
+ if (crtn -> stack_end != NULL) {
# ifdef STACK_GROWS_UP
- if ((word)p->stack_end >= (word)lo
- && (word)p->stack_end < (word)hi)
+ if ((word)crtn -> stack_end >= (word)lo
+ && (word)crtn -> stack_end < (word)hi)
return TRUE;
# else /* STACK_GROWS_DOWN */
- if ((word)p->stack_end > (word)lo
- && (word)p->stack_end <= (word)hi)
+ if ((word)crtn -> stack_end > (word)lo
+ && (word)crtn -> stack_end <= (word)hi)
return TRUE;
# endif
}
@@ -1006,9 +1032,11 @@ GC_API void GC_CALL GC_register_altstack(void *normstack,
# endif
for (i = 0; i < THREAD_TABLE_SZ; i++) {
for (p = GC_threads[i]; p != NULL; p = p -> tm.next) {
- if ((word)(p -> stack_end) > (word)result
- && (word)(p -> stack_end) < (word)bound) {
- result = p -> stack_end;
+ GC_stack_context_t crtn = p -> crtn;
+
+ if ((word)(crtn -> stack_end) > (word)result
+ && (word)(crtn -> stack_end) < (word)bound) {
+ result = crtn -> stack_end;
}
}
}
@@ -1287,7 +1315,11 @@ GC_INNER void GC_wait_for_gc_completion(GC_bool wait_for_all)
/* TODO: To avoid TSan hang (when updating GC_bytes_freed), */
/* we just skip explicit freeing of GC_threads entries. */
# if !defined(THREAD_SANITIZER) || !defined(CAN_CALL_ATFORK)
- if (p != &first_thread) GC_INTERNAL_FREE(p);
+ if (p != &first_thread) {
+ GC_ASSERT(p -> crtn != &first_crtn);
+ GC_INTERNAL_FREE(p -> crtn);
+ GC_INTERNAL_FREE(p);
+ }
# endif
}
}
@@ -1514,19 +1546,18 @@ GC_INNER void GC_wait_for_gc_completion(GC_bool wait_for_all)
GC_INNER GC_bool GC_in_thread_creation = FALSE;
/* Protected by allocation lock. */
-GC_INNER_WIN32THREAD void GC_record_stack_base(GC_thread me,
+GC_INNER_WIN32THREAD void GC_record_stack_base(GC_stack_context_t crtn,
const struct GC_stack_base *sb)
{
# if !defined(GC_DARWIN_THREADS) && !defined(GC_WIN32_THREADS)
- me -> stack_ptr = (ptr_t)sb->mem_base;
+ crtn -> stack_ptr = (ptr_t)sb->mem_base;
# endif
- me -> stack_end = (ptr_t)sb->mem_base;
- if (NULL == me -> stack_end)
+ if ((crtn -> stack_end = (ptr_t)sb->mem_base) == NULL)
ABORT("Bad stack base in GC_register_my_thread");
# ifdef IA64
- me -> backing_store_end = (ptr_t)sb->reg_base;
+ crtn -> backing_store_end = (ptr_t)sb->reg_base;
# elif defined(I386) && defined(GC_WIN32_THREADS)
- me -> initial_stack_base = (ptr_t)sb->mem_base;
+ crtn -> initial_stack_base = (ptr_t)sb->mem_base;
# endif
}
@@ -1547,7 +1578,7 @@ STATIC GC_thread GC_register_my_thread_inner(const struct GC_stack_base *sb,
me -> mach_thread = mach_thread_self();
# endif
GC_in_thread_creation = FALSE;
- GC_record_stack_base(me, sb);
+ GC_record_stack_base(me -> crtn, sb);
return me;
}
@@ -1769,29 +1800,31 @@ static GC_bool do_blocking_enter(GC_thread me)
# elif defined(E2K)
size_t stack_size;
# endif
+ GC_stack_context_t crtn = me -> crtn;
GC_bool topOfStackUnset = FALSE;
GC_ASSERT(I_HOLD_LOCK());
GC_ASSERT((me -> flags & DO_BLOCKING) == 0);
# ifdef SPARC
- me -> stack_ptr = bs_hi;
+ crtn -> stack_ptr = bs_hi;
# else
- me -> stack_ptr = GC_approx_sp();
+ crtn -> stack_ptr = GC_approx_sp();
# endif
# if defined(GC_DARWIN_THREADS) && !defined(DARWIN_DONT_PARSE_STACK)
- if (me -> topOfStack == NULL) {
+ if (NULL == crtn -> topOfStack) {
/* GC_do_blocking_inner is not called recursively, */
/* so topOfStack should be computed now. */
topOfStackUnset = TRUE;
- me -> topOfStack = GC_FindTopOfStack(0);
+ crtn -> topOfStack = GC_FindTopOfStack(0);
}
# endif
# ifdef IA64
- me -> backing_store_ptr = bs_hi;
+ crtn -> backing_store_ptr = bs_hi;
# elif defined(E2K)
- GC_ASSERT(NULL == me -> backing_store_end);
- stack_size = GC_alloc_and_get_procedure_stack(&me->backing_store_end);
- me->backing_store_ptr = me->backing_store_end + stack_size;
+ GC_ASSERT(NULL == crtn -> backing_store_end);
+ stack_size = GC_alloc_and_get_procedure_stack(
+ &(crtn -> backing_store_end));
+ crtn -> backing_store_ptr = crtn -> backing_store_end + stack_size;
# endif
me -> flags |= DO_BLOCKING;
/* Save context here if we want to support precise stack marking. */
@@ -1803,16 +1836,20 @@ static void do_blocking_leave(GC_thread me, GC_bool topOfStackUnset)
GC_ASSERT(I_HOLD_LOCK());
me -> flags &= ~DO_BLOCKING;
# ifdef E2K
- GC_ASSERT(me -> backing_store_end != NULL);
- /* Note that me->backing_store_end value here may differ from */
- /* the one stored in this function previously. */
- GC_INTERNAL_FREE(me -> backing_store_end);
- me -> backing_store_ptr = NULL;
- me -> backing_store_end = NULL;
+ {
+ GC_stack_context_t crtn = me -> crtn;
+
+ GC_ASSERT(crtn -> backing_store_end != NULL);
+ /* Note that value of backing_store_end here may differ from */
+ /* the one stored in this function previously. */
+ GC_INTERNAL_FREE(crtn -> backing_store_end);
+ crtn -> backing_store_ptr = NULL;
+ crtn -> backing_store_end = NULL;
+ }
# endif
# if defined(GC_DARWIN_THREADS) && !defined(DARWIN_DONT_PARSE_STACK)
if (topOfStackUnset)
- me -> topOfStack = NULL; /* make topOfStack unset again */
+ me -> crtn -> topOfStack = NULL; /* make it unset again */
# else
(void)topOfStackUnset;
# endif
@@ -1898,23 +1935,27 @@ GC_API void GC_CALL GC_set_stackbottom(void *gc_thread_handle,
if (!EXPECT(GC_is_initialized, TRUE)) {
GC_ASSERT(NULL == t);
} else {
+ GC_stack_context_t crtn;
+
GC_ASSERT(I_HOLD_LOCK());
if (NULL == t) /* current thread? */
t = GC_lookup_thread(thread_id_self());
GC_ASSERT(!KNOWN_FINISHED(t));
+ crtn = t -> crtn;
GC_ASSERT((t -> flags & DO_BLOCKING) == 0
- && NULL == t -> traced_stack_sect); /* for now */
+ && NULL == crtn -> traced_stack_sect); /* for now */
# ifndef GC_WIN32_THREADS
if (EXPECT((t -> flags & MAIN_THREAD) == 0, TRUE))
# endif
{
- t -> stack_end = (ptr_t)sb->mem_base;
+ crtn -> stack_end = (ptr_t)sb->mem_base;
# ifdef IA64
- t -> backing_store_end = (ptr_t)sb->reg_base;
+ crtn -> backing_store_end = (ptr_t)sb->reg_base;
# endif
# ifdef GC_WIN32_THREADS
- t -> last_stack_min = ADDR_LIMIT; /* reset the known minimum */
+ /* Reset the known minimum (hottest address in the stack). */
+ crtn -> last_stack_min = ADDR_LIMIT;
# endif
return;
}
@@ -1945,13 +1986,15 @@ GC_API void * GC_CALL GC_get_my_stackbottom(struct GC_stack_base *sb)
} else
# endif
/* else */ {
- sb -> mem_base = me -> stack_end;
-# ifdef IA64
- sb -> reg_base = me -> backing_store_end;
-# endif
+ GC_stack_context_t crtn = me -> crtn;
+
+ sb -> mem_base = crtn -> stack_end;
+# ifdef IA64
+ sb -> reg_base = crtn -> backing_store_end;
+# endif
}
# ifdef E2K
- sb -> reg_base = NULL;
+ sb -> reg_base = NULL;
# endif
UNLOCK();
return (void *)me; /* gc_thread_handle */
@@ -1967,6 +2010,7 @@ GC_API void * GC_CALL GC_call_with_gc_active(GC_fn_type fn,
struct GC_traced_stack_sect_s stacksect;
thread_id_t self_id = thread_id_self();
GC_thread me;
+ GC_stack_context_t crtn;
# ifdef E2K
size_t stack_size;
# endif
@@ -1974,6 +2018,7 @@ GC_API void * GC_CALL GC_call_with_gc_active(GC_fn_type fn,
LOCK(); /* This will block if the world is stopped. */
me = GC_lookup_thread(self_id);
+ crtn = me -> crtn;
/* Adjust our stack bottom value (this could happen unless */
/* GC_get_stack_base() was used which returned GC_SUCCESS). */
@@ -1985,11 +2030,13 @@ GC_API void * GC_CALL GC_call_with_gc_active(GC_fn_type fn,
} else
# endif
/* else */ {
- GC_ASSERT(me -> stack_end != NULL);
- if ((word)(me -> stack_end) HOTTER_THAN (word)(&stacksect)) {
- me -> stack_end = (ptr_t)(&stacksect);
+ ptr_t stack_end = crtn -> stack_end; /* read of a volatile field */
+
+ GC_ASSERT(stack_end != NULL);
+ if ((word)stack_end HOTTER_THAN (word)(&stacksect)) {
+ crtn -> stack_end = (ptr_t)(&stacksect);
# if defined(I386) && defined(GC_WIN32_THREADS)
- me -> initial_stack_base = me -> stack_end;
+ crtn -> initial_stack_base = (ptr_t)(&stacksect);
# endif
}
}
@@ -2009,50 +2056,53 @@ GC_API void * GC_CALL GC_call_with_gc_active(GC_fn_type fn,
UNLOCK();
GC_suspend_self_inner(me, suspend_cnt);
LOCK();
+ GC_ASSERT(me -> crtn == crtn);
}
# endif
/* Setup new "stack section". */
- stacksect.saved_stack_ptr = me -> stack_ptr;
+ stacksect.saved_stack_ptr = crtn -> stack_ptr;
# ifdef IA64
/* This is the same as in GC_call_with_stack_base(). */
stacksect.backing_store_end = GC_save_regs_in_stack();
/* Unnecessarily flushes register stack, */
/* but that probably doesn't hurt. */
- stacksect.saved_backing_store_ptr = me -> backing_store_ptr;
+ stacksect.saved_backing_store_ptr = crtn -> backing_store_ptr;
# elif defined(E2K)
- GC_ASSERT(me -> backing_store_end != NULL);
- GC_INTERNAL_FREE(me -> backing_store_end);
- me -> backing_store_ptr = NULL;
- me -> backing_store_end = NULL;
+ GC_ASSERT(crtn -> backing_store_end != NULL);
+ GC_INTERNAL_FREE(crtn -> backing_store_end);
+ crtn -> backing_store_ptr = NULL;
+ crtn -> backing_store_end = NULL;
# endif
- stacksect.prev = me -> traced_stack_sect;
+ stacksect.prev = crtn -> traced_stack_sect;
me -> flags &= ~DO_BLOCKING;
- me -> traced_stack_sect = &stacksect;
+ crtn -> traced_stack_sect = &stacksect;
UNLOCK();
client_data = fn(client_data);
GC_ASSERT((me -> flags & DO_BLOCKING) == 0);
- GC_ASSERT(me -> traced_stack_sect == &stacksect);
/* Restore original "stack section". */
-# ifdef CPPCHECK
- GC_noop1((word)me->traced_stack_sect);
-# endif
# ifdef E2K
(void)GC_save_regs_in_stack();
# endif
LOCK();
- me -> traced_stack_sect = stacksect.prev;
+ GC_ASSERT(me -> crtn == crtn);
+ GC_ASSERT(crtn -> traced_stack_sect == &stacksect);
+# ifdef CPPCHECK
+ GC_noop1((word)(crtn -> traced_stack_sect));
+# endif
+ crtn -> traced_stack_sect = stacksect.prev;
# ifdef IA64
- me -> backing_store_ptr = stacksect.saved_backing_store_ptr;
+ crtn -> backing_store_ptr = stacksect.saved_backing_store_ptr;
# elif defined(E2K)
- GC_ASSERT(NULL == me -> backing_store_end);
- stack_size = GC_alloc_and_get_procedure_stack(&me->backing_store_end);
- me->backing_store_ptr = me->backing_store_end + stack_size;
+ GC_ASSERT(NULL == crtn -> backing_store_end);
+ stack_size = GC_alloc_and_get_procedure_stack(
+ &(crtn -> backing_store_end));
+ crtn -> backing_store_ptr = crtn -> backing_store_end + stack_size;
# endif
me -> flags |= DO_BLOCKING;
- me -> stack_ptr = stacksect.saved_stack_ptr;
+ crtn -> stack_ptr = stacksect.saved_stack_ptr;
UNLOCK();
return client_data; /* result */
@@ -2237,7 +2287,7 @@ GC_API int GC_CALL GC_register_my_thread(const struct GC_stack_base *sb)
/* with MACH_SEND_INVALID_DEST error. */
me -> mach_thread = mach_thread_self();
# endif
- GC_record_stack_base(me, sb);
+ GC_record_stack_base(me -> crtn, sb);
me -> flags &= ~FINISHED; /* but not DETACHED */
} else
# endif
@@ -2348,6 +2398,7 @@ GC_API int GC_CALL GC_register_my_thread(const struct GC_stack_base *sb)
/* client thread key destructor). */
if (KNOWN_FINISHED(t)) {
GC_delete_gc_thread_no_free(t);
+ GC_INTERNAL_FREE(t -> crtn);
GC_INTERNAL_FREE(t);
}
UNLOCK();
@@ -2387,6 +2438,7 @@ GC_API int GC_CALL GC_register_my_thread(const struct GC_stack_base *sb)
/* Here the pthread id may have been recycled. */
if (KNOWN_FINISHED(t)) {
GC_delete_gc_thread_no_free(t);
+ GC_INTERNAL_FREE(t -> crtn);
GC_INTERNAL_FREE(t);
}
UNLOCK();
diff --git a/win32_threads.c b/win32_threads.c
index c60a2592..c5eb9a24 100644
--- a/win32_threads.c
+++ b/win32_threads.c
@@ -158,7 +158,10 @@ GC_API void GC_CALL GC_use_threads_discovery(void)
/* Things may get quite slow for large numbers of threads, */
/* since we look them up with sequential search. */
-volatile struct GC_Thread_Rep dll_thread_table[MAX_THREADS];
+static volatile struct GC_Thread_Rep dll_thread_table[MAX_THREADS];
+#ifndef GC_NO_THREADS_DISCOVERY
+ static struct GC_StackContext_Rep dll_crtn_table[MAX_THREADS];
+#endif
STATIC volatile LONG GC_max_thread_index = 0;
/* Largest index in dll_thread_table */
@@ -216,9 +219,9 @@ GC_INNER GC_thread GC_register_my_thread_inner(const struct GC_stack_base *sb,
}
/* Update GC_max_thread_index if necessary. The following is */
/* safe, and unlike CompareExchange-based solutions seems to work */
- /* on all Windows95 and later platforms. */
- /* Unfortunately, GC_max_thread_index may be temporarily out of */
- /* bounds, so readers have to compensate. */
+ /* on all Windows 95 and later platforms. Unfortunately, */
+ /* GC_max_thread_index may be temporarily out of bounds, so */
+ /* readers have to compensate. */
while (i > GC_max_thread_index) {
InterlockedIncrement((LONG *)&GC_max_thread_index);
/* Cast away volatile for older versions of Win32 headers. */
@@ -229,6 +232,7 @@ GC_INNER GC_thread GC_register_my_thread_inner(const struct GC_stack_base *sb,
GC_max_thread_index = MAX_THREADS - 1;
}
me = (GC_thread)(dll_thread_table + i);
+ me -> crtn = &dll_crtn_table[i];
} else
# endif
/* else */ /* Not using DllMain */ {
@@ -253,8 +257,8 @@ GC_INNER GC_thread GC_register_my_thread_inner(const struct GC_stack_base *sb,
": errcode= 0x%X", (unsigned)GetLastError());
}
# endif
- me -> last_stack_min = ADDR_LIMIT;
- GC_record_stack_base(me, sb);
+ me -> crtn -> last_stack_min = ADDR_LIMIT;
+ GC_record_stack_base(me -> crtn, sb);
/* Up until this point, GC_push_all_stacks considers this thread */
/* invalid. */
/* Up until this point, this entry is viewed as reserved but invalid */
@@ -421,7 +425,7 @@ STATIC void GC_suspend(GC_thread t)
&& exitCode != STILL_ACTIVE) {
GC_release_dirty_lock();
# ifdef GC_PTHREADS
- t -> stack_end = NULL; /* prevent stack from being pushed */
+ t -> crtn -> stack_end = NULL; /* prevent stack from being pushed */
# else
/* This breaks pthread_join on Cygwin, which is guaranteed to */
/* only see user threads. */
@@ -459,7 +463,7 @@ STATIC void GC_suspend(GC_thread t)
&& exitCode != STILL_ACTIVE) {
GC_release_dirty_lock();
# ifdef GC_PTHREADS
- t -> stack_end = NULL; /* prevent stack from being pushed */
+ t -> crtn -> stack_end = NULL; /* prevent stack from being pushed */
# else
GC_ASSERT(GC_win32_dll_threads);
GC_delete_gc_thread_no_free(t);
@@ -523,7 +527,7 @@ GC_INNER void GC_stop_world(void)
for (i = 0; i <= my_max; i++) {
GC_thread p = (GC_thread)(dll_thread_table + i);
- if (p -> stack_end != NULL && (p -> flags & DO_BLOCKING) == 0
+ if (p -> crtn -> stack_end != NULL && (p -> flags & DO_BLOCKING) == 0
&& p -> id != self_id) {
GC_suspend(p);
}
@@ -536,7 +540,7 @@ GC_INNER void GC_stop_world(void)
for (i = 0; i < THREAD_TABLE_SZ; i++) {
for (p = GC_threads[i]; p != NULL; p = p -> tm.next)
- if (p -> stack_end != NULL && p -> id != self_id
+ if (p -> crtn -> stack_end != NULL && p -> id != self_id
&& (p -> flags & (FINISHED | DO_BLOCKING)) == 0)
GC_suspend(p);
}
@@ -571,7 +575,7 @@ GC_INNER void GC_start_world(void)
# ifdef DEBUG_THREADS
GC_log_printf("Resuming 0x%x\n", (int)p->id);
# endif
- GC_ASSERT(p -> stack_end != NULL && p -> id != self_id);
+ GC_ASSERT(p -> crtn -> stack_end != NULL && p -> id != self_id);
if (ResumeThread(THREAD_HANDLE(p)) == (DWORD)-1)
ABORT("ResumeThread failed");
p -> flags &= ~IS_SUSPENDED;
@@ -590,7 +594,7 @@ GC_INNER void GC_start_world(void)
# ifdef DEBUG_THREADS
GC_log_printf("Resuming 0x%x\n", (int)p->id);
# endif
- GC_ASSERT(p -> stack_end != NULL && p -> id != self_id);
+ GC_ASSERT(p -> crtn -> stack_end != NULL && p -> id != self_id);
if (ResumeThread(THREAD_HANDLE(p)) == (DWORD)-1)
ABORT("ResumeThread failed");
GC_win32_unprotect_thread(p);
@@ -734,8 +738,12 @@ STATIC word GC_push_stack_for(GC_thread thread, thread_id_t self_id,
{
GC_bool is_self = FALSE;
ptr_t sp, stack_min;
- struct GC_traced_stack_sect_s *traced_stack_sect =
- thread -> traced_stack_sect;
+ GC_stack_context_t crtn = thread -> crtn;
+ ptr_t stack_end = crtn -> stack_end;
+ struct GC_traced_stack_sect_s *traced_stack_sect = crtn -> traced_stack_sect;
+
+ if (EXPECT(NULL == stack_end, FALSE)) return 0;
+
if (thread -> id == self_id) {
GC_ASSERT((thread -> flags & DO_BLOCKING) == 0);
sp = GC_approx_sp();
@@ -743,7 +751,7 @@ STATIC word GC_push_stack_for(GC_thread thread, thread_id_t self_id,
*pfound_me = TRUE;
} else if ((thread -> flags & DO_BLOCKING) != 0) {
/* Use saved sp value for blocked threads. */
- sp = thread -> stack_ptr;
+ sp = crtn -> stack_ptr;
} else {
# ifdef RETRY_GET_THREAD_CONTEXT
/* We cache context when suspending the thread since it may */
@@ -767,7 +775,7 @@ STATIC word GC_push_stack_for(GC_thread thread, thread_id_t self_id,
} else {
# ifdef RETRY_GET_THREAD_CONTEXT
/* At least, try to use the stale context if saved. */
- sp = thread->context_sp;
+ sp = thread -> context_sp;
if (NULL == sp) {
/* Skip the current thread, anyway its stack will */
/* be pushed when the world is stopped. */
@@ -810,12 +818,11 @@ STATIC word GC_push_stack_for(GC_thread thread, thread_id_t self_id,
GC_log_printf("TIB stack limit/base: %p .. %p\n",
(void *)tib->StackLimit, (void *)tib->StackBase);
# endif
- GC_ASSERT(!((word)(thread -> stack_end)
- COOLER_THAN (word)tib->StackBase));
- if (thread -> stack_end != thread -> initial_stack_base
- /* We are in a coroutine. */
- && ((word)(thread -> stack_end) <= (word)tib->StackLimit
- || (word)tib->StackBase < (word)(thread -> stack_end))) {
+ GC_ASSERT(!((word)stack_end COOLER_THAN (word)tib->StackBase));
+ if (stack_end != crtn -> initial_stack_base
+ /* We are in a coroutine (old-style way of the support). */
+ && ((word)stack_end <= (word)tib->StackLimit
+ || (word)tib->StackBase < (word)stack_end)) {
/* The coroutine stack is not within TIB stack. */
WARN("GetThreadContext might return stale register values"
" including ESP= %p\n", sp);
@@ -850,80 +857,78 @@ STATIC word GC_push_stack_for(GC_thread thread, thread_id_t self_id,
/* or to an address in the thread stack no larger than sp, */
/* taking advantage of the old value to avoid slow traversals */
/* of large stacks. */
- if (thread -> last_stack_min == ADDR_LIMIT) {
+ if (crtn -> last_stack_min == ADDR_LIMIT) {
# ifdef MSWINCE
if (GC_dont_query_stack_min) {
stack_min = GC_wince_evaluate_stack_min(traced_stack_sect != NULL ?
- (ptr_t)traced_stack_sect : thread -> stack_end);
+ (ptr_t)traced_stack_sect : stack_end);
/* Keep last_stack_min value unmodified. */
} else
# endif
/* else */ {
stack_min = GC_get_stack_min(traced_stack_sect != NULL ?
- (ptr_t)traced_stack_sect : thread -> stack_end);
+ (ptr_t)traced_stack_sect : stack_end);
GC_win32_unprotect_thread(thread);
- thread -> last_stack_min = stack_min;
+ crtn -> last_stack_min = stack_min;
}
} else {
/* First, adjust the latest known minimum stack address if we */
/* are inside GC_call_with_gc_active(). */
if (traced_stack_sect != NULL &&
- (word)thread->last_stack_min > (word)traced_stack_sect) {
+ (word)(crtn -> last_stack_min) > (word)traced_stack_sect) {
GC_win32_unprotect_thread(thread);
- thread -> last_stack_min = (ptr_t)traced_stack_sect;
+ crtn -> last_stack_min = (ptr_t)traced_stack_sect;
}
- if ((word)sp < (word)(thread -> stack_end)
- && (word)sp >= (word)thread->last_stack_min) {
+ if ((word)sp < (word)stack_end
+ && (word)sp >= (word)(crtn -> last_stack_min)) {
stack_min = sp;
} else {
/* In the current thread it is always safe to use sp value. */
- if (may_be_in_stack(is_self && (word)sp < (word)thread->last_stack_min ?
- sp : thread -> last_stack_min)) {
+ if (may_be_in_stack(is_self && (word)sp < (word)(crtn -> last_stack_min)
+ ? sp : crtn -> last_stack_min)) {
stack_min = (ptr_t)last_info.BaseAddress;
/* Do not probe rest of the stack if sp is correct. */
- if ((word)sp < (word)stack_min
- || (word)sp >= (word)(thread -> stack_end))
- stack_min = GC_get_stack_min(thread -> last_stack_min);
+ if ((word)sp < (word)stack_min || (word)sp >= (word)stack_end)
+ stack_min = GC_get_stack_min(crtn -> last_stack_min);
} else {
/* Stack shrunk? Is this possible? */
- stack_min = GC_get_stack_min(thread -> stack_end);
+ stack_min = GC_get_stack_min(stack_end);
}
GC_win32_unprotect_thread(thread);
- thread -> last_stack_min = stack_min;
+ crtn -> last_stack_min = stack_min;
}
}
GC_ASSERT(GC_dont_query_stack_min
- || stack_min == GC_get_stack_min(thread -> stack_end)
+ || stack_min == GC_get_stack_min(stack_end)
|| ((word)sp >= (word)stack_min
- && (word)stack_min < (word)(thread -> stack_end)
- && (word)stack_min
- > (word)GC_get_stack_min(thread -> stack_end)));
+ && (word)stack_min < (word)stack_end
+ && (word)stack_min > (word)GC_get_stack_min(stack_end)));
- if ((word)sp >= (word)stack_min && (word)sp < (word)(thread -> stack_end)) {
+ if ((word)sp >= (word)stack_min && (word)sp < (word)stack_end) {
# ifdef DEBUG_THREADS
GC_log_printf("Pushing stack for 0x%x from sp %p to %p from 0x%x\n",
- (int)thread->id, (void *)sp,
- (void *)(thread -> stack_end), (int)self_id);
+ (int)(thread -> id), (void *)sp, (void *)stack_end,
+ (int)self_id);
# endif
- GC_push_all_stack_sections(sp, thread -> stack_end, traced_stack_sect);
+ GC_push_all_stack_sections(sp, stack_end, traced_stack_sect);
} else {
/* If not current thread then it is possible for sp to point to */
/* the guarded (untouched yet) page just below the current */
/* stack_min of the thread. */
- if (is_self || (word)sp >= (word)(thread -> stack_end)
+ if (is_self || (word)sp >= (word)stack_end
|| (word)(sp + GC_page_size) < (word)stack_min)
WARN("Thread stack pointer %p out of range, pushing everything\n", sp);
# ifdef DEBUG_THREADS
GC_log_printf("Pushing stack for 0x%x from (min) %p to %p from 0x%x\n",
- (int)thread->id, (void *)stack_min,
- (void *)(thread -> stack_end), (int)self_id);
+ (int)(thread -> id), (void *)stack_min, (void *)stack_end,
+ (int)self_id);
# endif
/* Push everything - ignore "traced stack section" data. */
- GC_push_all_stack(stack_min, thread -> stack_end);
+ GC_push_all_stack(stack_min, stack_end);
}
- return thread -> stack_end - sp; /* stack grows down */
+ return stack_end - sp; /* stack grows down */
}
/* Should do exactly the right thing if the world is stopped; should */
@@ -947,7 +952,7 @@ GC_INNER void GC_push_all_stacks(void)
for (i = 0; i <= my_max; i++) {
GC_thread p = (GC_thread)(dll_thread_table + i);
- if (p -> tm.in_use && p -> stack_end != NULL) {
+ if (p -> tm.in_use) {
# ifndef SMALL_CONFIG
++nthreads;
# endif
@@ -962,7 +967,7 @@ GC_INNER void GC_push_all_stacks(void)
GC_thread p;
for (p = GC_threads[i]; p != NULL; p = p -> tm.next)
- if (!KNOWN_FINISHED(p) && p -> stack_end != NULL) {
+ if (!KNOWN_FINISHED(p)) {
# ifndef SMALL_CONFIG
++nthreads;
# endif
@@ -1010,13 +1015,14 @@ GC_INNER void GC_get_next_stack(char *start, char *limit,
LONG my_max = GC_get_max_thread_index();
for (i = 0; i <= my_max; i++) {
- ptr_t s = (ptr_t)dll_thread_table[i].stack_end;
+ ptr_t stack_end = (ptr_t)dll_thread_table[i].crtn -> stack_end;
- if ((word)s > (word)start && (word)s < (word)current_min) {
+ if ((word)stack_end > (word)start
+ && (word)stack_end < (word)current_min) {
/* Update address of last_stack_min. */
plast_stack_min = (ptr_t * /* no volatile */)
- &dll_thread_table[i].last_stack_min;
- current_min = s;
+ &(dll_thread_table[i].crtn -> last_stack_min);
+ current_min = stack_end;
# ifdef CPPCHECK
/* To avoid a warning that thread is always null. */
thread = (GC_thread)&dll_thread_table[i];
@@ -1028,13 +1034,15 @@ GC_INNER void GC_get_next_stack(char *start, char *limit,
GC_thread p;
for (p = GC_threads[i]; p != NULL; p = p -> tm.next) {
- ptr_t s = p -> stack_end;
+ GC_stack_context_t crtn = p -> crtn;
+ ptr_t stack_end = crtn -> stack_end; /* read of a volatile field */
- if ((word)s > (word)start && (word)s < (word)current_min) {
+ if ((word)stack_end > (word)start
+ && (word)stack_end < (word)current_min) {
/* Update address of last_stack_min. */
- plast_stack_min = &(p -> last_stack_min);
+ plast_stack_min = &(crtn -> last_stack_min);
thread = p; /* Remember current thread to unprotect. */
- current_min = s;
+ current_min = stack_end;
}
}
}