diff options
-rw-r--r-- | alloc.c | 6 | ||||
-rw-r--r-- | blacklst.c | 23 | ||||
-rw-r--r-- | darwin_stop_world.c | 8 | ||||
-rw-r--r-- | dyn_load.c | 5 | ||||
-rw-r--r-- | finalize.c | 20 | ||||
-rw-r--r-- | gcj_mlc.c | 2 | ||||
-rw-r--r-- | include/gc/javaxfc.h | 1 | ||||
-rw-r--r-- | malloc.c | 1 | ||||
-rw-r--r-- | mallocx.c | 2 | ||||
-rw-r--r-- | mark.c | 22 | ||||
-rw-r--r-- | mark_rts.c | 8 | ||||
-rw-r--r-- | new_hblk.c | 2 | ||||
-rw-r--r-- | os_dep.c | 14 | ||||
-rw-r--r-- | pthread_stop_world.c | 16 | ||||
-rw-r--r-- | pthread_support.c | 20 | ||||
-rw-r--r-- | thread_local_alloc.c | 5 | ||||
-rw-r--r-- | typd_mlc.c | 6 | ||||
-rw-r--r-- | win32_threads.c | 29 |
18 files changed, 88 insertions, 102 deletions
@@ -221,7 +221,7 @@ STATIC int GC_n_attempts = 0; /* Number of attempts at finishing */ /* collection within GC_time_limit. */ STATIC GC_stop_func GC_default_stop_func = GC_never_stop_func; - /* accessed holding the lock. */ + /* Accessed holding the allocator lock. */ GC_API void GC_CALL GC_set_stop_func(GC_stop_func stop_func) { @@ -1092,8 +1092,8 @@ GC_INLINE int GC_compute_heap_usage_percent(void) TO_KiB_UL(GC_composite_in_use), \ TO_KiB_UL(GC_atomic_in_use)) -/* Finish up a collection. Assumes mark bits are consistent, lock is */ -/* held, but the world is otherwise running. */ +/* Finish up a collection. Assumes mark bits are consistent, but the */ +/* world is otherwise running. */ STATIC void GC_finish_collection(void) { # ifndef NO_CLOCK @@ -24,16 +24,11 @@ * block, even though it does not start on the dangerous block. */ -/* - * Externally callable routines are: - - * GC_add_to_black_list_normal - * GC_add_to_black_list_stack - * GC_promote_black_lists - * GC_is_black_listed - * - * All require that the allocator lock is held. - */ +/* Externally callable routines are: */ +/* - GC_add_to_black_list_normal, */ +/* - GC_add_to_black_list_stack, */ +/* - GC_promote_black_lists, */ +/* - GC_is_black_listed. */ /* Pointers to individual tables. We replace one table by another by */ /* switching these pointers. */ @@ -141,6 +136,7 @@ GC_INNER void GC_promote_black_lists(void) word * very_old_normal_bl = GC_old_normal_bl; word * very_old_stack_bl = GC_old_stack_bl; + GC_ASSERT(I_HOLD_LOCK()); GC_old_normal_bl = GC_incomplete_normal_bl; GC_old_stack_bl = GC_incomplete_stack_bl; if (!GC_all_interior_pointers) { @@ -197,6 +193,9 @@ GC_INNER void GC_unpromote_black_lists(void) GC_INNER void GC_add_to_black_list_normal(word p) #endif { +# ifndef PARALLEL_MARK + GC_ASSERT(I_HOLD_LOCK()); +# endif if (GC_modws_valid_offsets[p & (sizeof(word)-1)]) { word index = PHT_HASH((word)p); @@ -221,6 +220,9 @@ GC_INNER void GC_unpromote_black_lists(void) { word index = PHT_HASH((word)p); +# ifndef PARALLEL_MARK + GC_ASSERT(I_HOLD_LOCK()); +# endif if (HDR(p) == 0 || get_pht_entry_from_index(GC_old_stack_bl, index)) { # ifdef PRINT_BLACK_LIST if (!get_pht_entry_from_index(GC_incomplete_stack_bl, index)) { @@ -238,6 +240,7 @@ GC_INNER void GC_unpromote_black_lists(void) * that every smaller value of r after h is also black listed.) * If (h,len) is not black listed, return 0. * Knows about the structure of the black list hash tables. + * Assumes the allocation lock is held but no assertion about it by design. */ struct hblk * GC_is_black_listed(struct hblk *h, word len) { diff --git a/darwin_stop_world.c b/darwin_stop_world.c index 175cb907..d0f89a7d 100644 --- a/darwin_stop_world.c +++ b/darwin_stop_world.c @@ -352,6 +352,8 @@ GC_INNER void GC_push_all_stacks(void) int nthreads = 0; word total_size = 0; mach_msg_type_number_t listcount = (mach_msg_type_number_t)THREAD_TABLE_SZ; + + GC_ASSERT(I_HOLD_LOCK()); if (!EXPECT(GC_thr_initialized, TRUE)) GC_thr_init(); @@ -539,13 +541,13 @@ STATIC GC_bool GC_suspend_thread_list(thread_act_array_t act_list, int count, #endif /* !GC_NO_THREADS_DISCOVERY */ -/* Caller holds allocation lock. */ GC_INNER void GC_stop_world(void) { task_t my_task = current_task(); mach_port_t my_thread = mach_thread_self(); kern_return_t kern_result; + GC_ASSERT(I_HOLD_LOCK()); # ifdef DEBUG_THREADS GC_log_printf("Stopping the world from thread %p\n", (void *)(word)my_thread); @@ -679,11 +681,11 @@ GC_INLINE void GC_thread_resume(thread_act_t thread) } } -/* Caller holds allocation lock, and has held it continuously since */ -/* the world stopped. */ GC_INNER void GC_start_world(void) { task_t my_task = current_task(); + + GC_ASSERT(I_HOLD_LOCK()); /* held continuously since the world stopped */ # ifdef DEBUG_THREADS GC_log_printf("World starting\n"); # endif @@ -1322,7 +1322,6 @@ STATIC const char *GC_dyld_name_for_hdr(const struct GC_MACH_HEADER *hdr) return NULL; } -/* This should never be called by a thread holding the lock. */ STATIC void GC_dyld_image_add(const struct GC_MACH_HEADER *hdr, intptr_t slide) { @@ -1333,6 +1332,7 @@ STATIC void GC_dyld_image_add(const struct GC_MACH_HEADER *hdr, GC_has_static_roots_func callback = GC_has_static_roots; DCL_LOCK_STATE; + GC_ASSERT(I_DONT_HOLD_LOCK()); if (GC_no_dls) return; # ifdef DARWIN_DEBUG name = GC_dyld_name_for_hdr(hdr); @@ -1392,7 +1392,6 @@ STATIC void GC_dyld_image_add(const struct GC_MACH_HEADER *hdr, # endif } -/* This should never be called by a thread holding the lock. */ STATIC void GC_dyld_image_remove(const struct GC_MACH_HEADER *hdr, intptr_t slide) { @@ -1403,6 +1402,7 @@ STATIC void GC_dyld_image_remove(const struct GC_MACH_HEADER *hdr, DCL_LOCK_STATE; # endif + GC_ASSERT(I_DONT_HOLD_LOCK()); for (i = 0; i < sizeof(GC_dyld_sections)/sizeof(GC_dyld_sections[0]); i++) { sec = GC_GETSECTBYNAME(hdr, GC_dyld_sections[i].seg, GC_dyld_sections[i].sect); @@ -1466,6 +1466,7 @@ GC_INNER void GC_init_dyld(void) { static GC_bool initialized = FALSE; + GC_ASSERT(I_DONT_HOLD_LOCK()); if (initialized) return; # ifdef DARWIN_DEBUG @@ -88,9 +88,8 @@ GC_API void GC_CALL GC_push_finalizer_structures(void) #endif /* Double the size of a hash table. *log_size_ptr is the log of its */ -/* current size. May be a no-op. */ -/* *table is a pointer to an array of hash headers. If we succeed, we */ -/* update both *table and *log_size_ptr. Lock is held. */ +/* current size. May be a no-op. *table is a pointer to an array of */ +/* hash headers. We update both *table and *log_size_ptr on success. */ STATIC void GC_grow_table(struct hash_chain_entry ***table, unsigned *log_size_ptr, word *entries_ptr) { @@ -293,8 +292,8 @@ GC_API int GC_CALL GC_unregister_disappearing_link(void * * link) /* Mark from one finalizable object using the specified mark proc. */ /* May not mark the object pointed to by real_ptr (i.e, it is the job */ /* of the caller, if appropriate). Note that this is called with the */ -/* mutator running, but we hold the GC lock. This is safe only */ -/* if the mutator (client) gets the GC lock to reveal hidden pointers. */ +/* mutator running. This is safe only if the mutator (client) gets */ +/* the allocation lock to reveal hidden pointers. */ GC_INLINE void GC_mark_fo(ptr_t real_ptr, finalization_mark_proc mark_proc) { GC_ASSERT(I_HOLD_LOCK()); @@ -522,7 +521,6 @@ GC_API GC_await_finalize_proc GC_CALL GC_get_await_finalize_proc(void) #endif /* !GC_LONG_REFS_NOT_NEEDED */ #ifndef GC_MOVE_DISAPPEARING_LINK_NOT_NEEDED - /* Moves a link. Assume the lock is held. */ STATIC int GC_move_disappearing_link_inner( struct dl_hashtbl_s *dl_hashtbl, void **link, void **new_link) @@ -698,7 +696,6 @@ STATIC void GC_register_finalizer_inner(void * obj, GC_COND_LOG_PRINTF("Grew fo table to %u entries\n", 1U << GC_log_fo_table_size); } - /* in the THREADS case we hold allocation lock. */ for (;;) { struct finalizable_object *prev_fo = NULL; GC_oom_func oom_fn; @@ -927,7 +924,7 @@ GC_API void GC_CALL GC_register_finalizer_unreachable(void * obj, *(char *)&GC_finalizer_nested = (char)(nesting_level + 1); return (unsigned char *)&GC_finalizer_nested; } -#endif /* THREADS */ +#endif /* !THREADS */ GC_INLINE void GC_make_disappearing_links_disappear( struct dl_hashtbl_s* dl_hashtbl, @@ -983,9 +980,8 @@ GC_INLINE void GC_make_disappearing_links_disappear( GC_dirty(dl_hashtbl -> head); /* entire object */ } -/* Called with held lock (but the world is running). */ /* Cause disappearing links to disappear and unreachable objects to be */ -/* enqueued for finalization. */ +/* enqueued for finalization. Called with the world running. */ GC_INNER void GC_finalize(void) { struct finalizable_object * curr_fo, * prev_fo, * next_fo; @@ -1208,8 +1204,6 @@ GC_INNER void GC_finalize(void) * may have been finalized when these finalizers are run. * Finalizers run at this point must be prepared to deal with a * mostly broken world. - * This routine is externally callable, so is called without - * the allocation lock. */ GC_API void GC_CALL GC_finalize_all(void) { @@ -1244,13 +1238,13 @@ GC_API int GC_CALL GC_should_invoke_finalizers(void) } /* Invoke finalizers for all objects that are ready to be finalized. */ -/* Should be called without allocation lock. */ GC_API int GC_CALL GC_invoke_finalizers(void) { int count = 0; word bytes_freed_before = 0; /* initialized to prevent warning. */ DCL_LOCK_STATE; + GC_ASSERT(I_DONT_HOLD_LOCK()); while (GC_should_invoke_finalizers()) { struct finalizable_object * curr_fo; @@ -54,7 +54,6 @@ STATIC struct GC_ms_entry * GC_gcj_fake_mark_proc(word * addr GC_ATTR_UNUSED, return mark_stack_ptr; } -/* Caller does not hold allocation lock. */ GC_API void GC_CALL GC_init_gcj_malloc(int mp_index, void * /* really GC_mark_proc */mp) { @@ -130,6 +129,7 @@ static void maybe_finalize(void) static word last_finalized_no = 0; DCL_LOCK_STATE; + GC_ASSERT(I_HOLD_LOCK()); if (GC_gc_no == last_finalized_no || !EXPECT(GC_is_initialized, TRUE)) return; UNLOCK(); diff --git a/include/gc/javaxfc.h b/include/gc/javaxfc.h index 4ad46bfe..d6e3bb41 100644 --- a/include/gc/javaxfc.h +++ b/include/gc/javaxfc.h @@ -40,6 +40,7 @@ * finalizers which create new finalizable objects, though that's * probably unlikely. * Thus this is not recommended for general use. + * Acquire the allocation lock (to enqueue all finalizers). */ GC_API void GC_CALL GC_finalize_all(void); @@ -638,6 +638,7 @@ GC_API void GC_CALL GC_free(void * p) int knd; struct obj_kind * ok; + GC_ASSERT(I_HOLD_LOCK()); h = HBLKPTR(p); hhdr = HDR(h); knd = hhdr -> hb_obj_kind; @@ -413,7 +413,7 @@ GC_API void GC_CALL GC_generic_malloc_many(size_t lb, int k, void **result) if (GC_fl_builder_count == 0) GC_notify_all_builder(); GC_release_mark_lock(); LOCK(); - /* GC lock is needed for reclaim list access. We */ + /* The GC lock is needed for reclaim list access. We */ /* must decrement fl_builder_count before reacquiring */ /* the lock. Hopefully this path is rare. */ @@ -293,14 +293,12 @@ static void push_roots_and_advance(GC_bool push_all, ptr_t cold_gc_frame) /* Cold_gc_frame is an address inside a GC frame that */ /* remains valid until all marking is complete. */ /* A zero value indicates that it's OK to miss some */ -/* register values. */ -/* We hold the allocation lock. In the case of */ -/* incremental collection, the world may not be stopped.*/ +/* register values. In the case of an incremental */ +/* collection, the world may be running. */ #ifdef WRAP_MARK_SOME /* For win32, this is called after we establish a structured */ /* exception handler, in case Windows unmaps one of our root */ - /* segments. See below. In either case, we acquire the */ - /* allocator lock long before we get here. */ + /* segments. */ STATIC GC_bool GC_mark_some_inner(ptr_t cold_gc_frame) #else GC_INNER GC_bool GC_mark_some(ptr_t cold_gc_frame) @@ -1192,14 +1190,13 @@ STATIC void GC_mark_local(mse *local_mark_stack, int id) } } -/* Perform Parallel mark. */ -/* We hold the GC lock, not the mark lock. */ -/* Currently runs until the mark stack is */ -/* empty. */ +/* Perform parallel mark. We hold the GC lock, not the mark lock. */ +/* Currently runs until the mark stack is empty. */ STATIC void GC_do_parallel_mark(void) { GC_acquire_mark_lock(); GC_ASSERT(I_HOLD_LOCK()); + /* This could be a GC_ASSERT, but it seems safer to keep it on */ /* all the time, especially since it's cheap. */ if (GC_help_wanted || GC_active_count != 0 || GC_helper_count != 0) @@ -1226,10 +1223,8 @@ STATIC void GC_do_parallel_mark(void) GC_notify_all_marker(); } - -/* Try to help out the marker, if it's running. */ -/* We do not hold the GC lock, but the requestor does. */ -/* And we hold the mark lock. */ +/* Try to help out the marker, if it's running. We hold the mark lock */ +/* only, the initiating thread holds the allocation lock. */ GC_INNER void GC_help_marker(word my_mark_no) { # define my_id my_id_mse.mse_descr.w @@ -1237,6 +1232,7 @@ GC_INNER void GC_help_marker(word my_mark_no) mse local_mark_stack[LOCAL_MARK_STACK_SIZE]; /* Note: local_mark_stack is quite big (up to 128 KiB). */ + GC_ASSERT(I_DONT_HOLD_LOCK()); GC_ASSERT(GC_parallel); while (GC_mark_no < my_mark_no || (!GC_help_wanted && GC_mark_no == my_mark_no)) { @@ -288,9 +288,9 @@ GC_API void GC_CALL GC_clear_roots(void) UNLOCK(); } -/* Internal use only; lock held. */ STATIC void GC_remove_root_at_pos(int i) { + GC_ASSERT(I_HOLD_LOCK()); # ifdef DEBUG_ADD_DEL_ROOTS GC_log_printf("Remove data root section at %d: %p .. %p%s\n", i, (void *)GC_static_roots[i].r_start, @@ -354,7 +354,6 @@ GC_API void GC_CALL GC_remove_roots(void *b, void *e) UNLOCK(); } -/* Should only be called when the lock is held */ STATIC void GC_remove_roots_inner(ptr_t b, ptr_t e) { int i; @@ -362,6 +361,7 @@ STATIC void GC_remove_roots_inner(ptr_t b, ptr_t e) int old_n_roots = n_root_sets; # endif + GC_ASSERT(I_HOLD_LOCK()); for (i = 0; i < n_root_sets; ) { if ((word)GC_static_roots[i].r_start >= (word)b && (word)GC_static_roots[i].r_end <= (word)e) { @@ -562,13 +562,13 @@ STATIC struct exclusion * GC_next_exclusion(ptr_t start_addr) return GC_excl_table + low; } -/* Should only be called when the lock is held. The range boundaries */ -/* should be properly aligned and valid. */ +/* The range boundaries should be properly aligned and valid. */ GC_INNER void GC_exclude_static_roots_inner(void *start, void *finish) { struct exclusion * next; size_t next_index; + GC_ASSERT(I_HOLD_LOCK()); GC_ASSERT((word)start % sizeof(word) == 0); GC_ASSERT((word)start < (word)finish); @@ -103,7 +103,7 @@ /* Build a free list for objects of size sz inside heap block h. */ /* Clear objects inside h if clear is set. Add list to the end of */ /* the free list we build. Return the new free list. */ -/* This could be called without the main GC lock, if we ensure that */ +/* This could be called without the allocation lock, if we ensure that */ /* there is no concurrent collection which might reclaim objects that */ /* we have not yet allocated. */ GC_INNER ptr_t GC_build_fl(struct hblk *h, size_t sz, GC_bool clear, @@ -541,7 +541,6 @@ GC_INNER const char * GC_get_maps(void) /* works-around the issues. */ /* Return the first non-addressable location > p or bound. */ - /* Requires the allocation lock. */ STATIC ptr_t GC_find_limit_openbsd(ptr_t p, ptr_t bound) { static volatile ptr_t result; @@ -587,7 +586,6 @@ GC_INNER const char * GC_get_maps(void) static volatile int firstpass; /* Return first addressable location > p or bound. */ - /* Requires the allocation lock. */ STATIC ptr_t GC_skip_hole_openbsd(ptr_t p, ptr_t bound) { static volatile ptr_t result; @@ -981,15 +979,11 @@ GC_INNER size_t GC_page_size = 0; /* Return the first non-addressable location > p (up) or */ /* the smallest location q s.t. [q,p) is addressable (!up). */ /* We assume that p (up) or p-1 (!up) is addressable. */ - /* Requires allocation lock. */ GC_ATTR_NO_SANITIZE_ADDR STATIC ptr_t GC_find_limit_with_bound(ptr_t p, GC_bool up, ptr_t bound) { static volatile ptr_t result; - /* Safer if static, since otherwise it may not be */ - /* preserved across the longjmp. Can safely be */ - /* static since it's only called with the */ - /* allocation lock held. */ + /* Safer if static, see that in GC_find_limit_openbsd. */ GC_ASSERT(up ? (word)bound >= MIN_PAGE_SIZE : (word)bound <= ~(word)MIN_PAGE_SIZE); @@ -1578,7 +1572,7 @@ GC_INNER size_t GC_page_size = 0; /* Register static data segment(s) as roots. If more data segments are */ /* added later then they need to be registered at that point (as we do */ /* with SunOS dynamic loading), or GC_mark_roots needs to check for */ -/* them (as we do with PCR). Called with allocator lock held. */ +/* them (as we do with PCR). */ # ifdef OS2 void GC_register_data_segments(void) @@ -3074,8 +3068,6 @@ GC_API GC_push_other_roots_proc GC_CALL GC_get_push_other_roots(void) #endif /* !GWW_VDB && !SOFT_VDB */ #ifdef DEFAULT_VDB - /* All of the following assume the allocation lock is held. */ - /* The client asserts that unallocated pages in the heap are never */ /* written. */ @@ -5178,7 +5170,7 @@ GC_INNER void GC_save_callers(struct callinfo info[NFRAMES]) #ifdef NEED_CALLINFO -/* Print info to stderr. We do NOT hold the allocation lock */ +/* Print info to stderr. We do NOT hold the allocation lock. */ GC_INNER void GC_print_callers(struct callinfo info[NFRAMES]) { int i; diff --git a/pthread_stop_world.c b/pthread_stop_world.c index 4f7a9c78..3b0ac546 100644 --- a/pthread_stop_world.c +++ b/pthread_stop_world.c @@ -772,8 +772,8 @@ STATIC void GC_restart_handler(int sig) # undef ao_store_release_async #endif /* !GC_OPENBSD_UTHREADS && !NACL */ -/* We hold allocation lock. Should do exactly the right thing if the */ -/* world is stopped. Should not fail if it isn't. */ +/* Should do exactly the right thing if the world is stopped; should */ +/* not fail if it is not. */ GC_INNER void GC_push_all_stacks(void) { GC_bool found_me = FALSE; @@ -789,6 +789,7 @@ GC_INNER void GC_push_all_stacks(void) pthread_t self = pthread_self(); word total_size = 0; + GC_ASSERT(I_HOLD_LOCK()); if (!EXPECT(GC_thr_initialized, TRUE)) GC_thr_init(); # ifdef DEBUG_THREADS @@ -910,9 +911,8 @@ GC_INNER void GC_push_all_stacks(void) int GC_stopping_pid = 0; #endif -/* We hold the allocation lock. Suspend all threads that might */ -/* still be running. Return the number of suspend signals that */ -/* were sent. */ +/* Suspend all threads that might still be running. Return the number */ +/* of suspend signals that were sent. */ STATIC int GC_suspend_all(void) { int n_live_threads = 0; @@ -925,6 +925,7 @@ STATIC int GC_suspend_all(void) GC_ASSERT((GC_stop_count & THREAD_RESTARTED) == 0); # endif + GC_ASSERT(I_HOLD_LOCK()); for (i = 0; i < THREAD_TABLE_SZ; i++) { for (p = GC_threads[i]; p != 0; p = p -> next) { if (!THREAD_EQUAL(p -> id, self)) { @@ -989,6 +990,7 @@ STATIC int GC_suspend_all(void) # endif unsigned long num_sleeps = 0; + GC_ASSERT(I_HOLD_LOCK()); # ifdef DEBUG_THREADS GC_log_printf("pthread_stop_world: number of threads: %d\n", GC_nacl_num_gc_threads - 1); @@ -1303,14 +1305,12 @@ GC_INNER void GC_stop_world(void) } #endif /* !NACL */ -/* Caller holds allocation lock, and has held it continuously since */ -/* the world stopped. */ GC_INNER void GC_start_world(void) { # ifndef NACL int n_live_threads; - GC_ASSERT(I_HOLD_LOCK()); + GC_ASSERT(I_HOLD_LOCK()); /* held continuously since the world stopped */ # ifdef DEBUG_THREADS GC_log_printf("World starting\n"); # endif diff --git a/pthread_support.c b/pthread_support.c index e73cd894..bebbf92e 100644 --- a/pthread_support.c +++ b/pthread_support.c @@ -589,13 +589,13 @@ void GC_push_thread_structures(void) #endif /* DEBUG_THREADS */ /* Add a thread to GC_threads. We assume it wasn't already there. */ -/* Caller holds allocation lock. */ STATIC GC_thread GC_new_thread(pthread_t id) { int hv = THREAD_TABLE_INDEX(id); GC_thread result; static GC_bool first_thread_used = FALSE; + GC_ASSERT(I_HOLD_LOCK()); # ifdef DEBUG_THREADS GC_log_printf("Creating thread %p\n", (void *)id); for (result = GC_threads[hv]; result != NULL; result = result->next) @@ -604,7 +604,6 @@ STATIC GC_thread GC_new_thread(pthread_t id) break; } # endif - GC_ASSERT(I_HOLD_LOCK()); if (!EXPECT(first_thread_used, TRUE)) { result = &first_thread; first_thread_used = TRUE; @@ -708,8 +707,7 @@ STATIC void GC_delete_gc_thread(GC_thread t) /* Return a GC_thread corresponding to a given pthread_t. */ /* Returns 0 if it's not there. */ -/* Caller holds allocation lock or otherwise inhibits */ -/* updates. */ +/* Caller holds allocation lock or otherwise inhibits updates. */ /* If there is more than one thread with the given id we */ /* return the most recent one. */ GC_INNER GC_thread GC_lookup_thread(pthread_t id) @@ -734,13 +732,15 @@ GC_INNER GC_thread GC_lookup_thread(pthread_t id) /* Returns NULL if GC_invoke_finalizers() should not be called by the */ /* collector (to minimize the risk of a deep finalizers recursion), */ /* otherwise returns a pointer to the thread-local finalizer_nested. */ - /* Called by GC_notify_or_invoke_finalizers() only (the GC lock is */ - /* held). */ + /* Called by GC_notify_or_invoke_finalizers() only. */ GC_INNER unsigned char *GC_check_finalizer_nested(void) { - GC_thread me = GC_lookup_thread(pthread_self()); - unsigned nesting_level = me->finalizer_nested; + GC_thread me; + unsigned nesting_level; + GC_ASSERT(I_HOLD_LOCK()); + me = GC_lookup_thread(pthread_self()); + nesting_level = me->finalizer_nested; if (nesting_level) { /* We are inside another GC_invoke_finalizers(). */ /* Skip some implicitly-called GC_invoke_finalizers() */ @@ -1484,14 +1484,14 @@ GC_INNER void GC_thr_init(void) /* Perform all initializations, including those that */ /* may require allocation. */ -/* Called without allocation lock. */ /* Must be called before a second thread is created. */ -/* Did we say it's called without the allocation lock? */ GC_INNER void GC_init_parallel(void) { # if defined(THREAD_LOCAL_ALLOC) DCL_LOCK_STATE; # endif + + GC_ASSERT(I_DONT_HOLD_LOCK()); if (parallel_initialized) return; parallel_initialized = TRUE; diff --git a/thread_local_alloc.c b/thread_local_alloc.c index 73e1e69c..10ea2063 100644 --- a/thread_local_alloc.c +++ b/thread_local_alloc.c @@ -53,8 +53,7 @@ static void return_single_freelist(void *fl, void **gfl) } } -/* Recover the contents of the freelist array fl into the global one gfl.*/ -/* We hold the allocator lock. */ +/* Recover the contents of the freelist array fl into the global one gfl. */ static void return_freelists(void **fl, void **gfl) { int i; @@ -128,11 +127,11 @@ GC_INNER void GC_init_thread_local(GC_tlfs p) # endif } -/* We hold the allocator lock. */ GC_INNER void GC_destroy_thread_local(GC_tlfs p) { int k; + GC_ASSERT(I_HOLD_LOCK()); /* We currently only do this from the thread itself. */ GC_STATIC_ASSERT(THREAD_FREELISTS_KINDS <= MAXOBJKINDS); for (k = 0; k < THREAD_FREELISTS_KINDS; ++k) { @@ -96,10 +96,8 @@ STATIC void GC_push_typed_structures_proc(void) GC_PUSH_ALL_SYM(GC_ext_descriptors); } -/* Add a multiword bitmap to GC_ext_descriptors arrays. Return */ -/* starting index. */ -/* Returns -1 on failure. */ -/* Caller does not hold allocation lock. */ +/* Add a multiword bitmap to GC_ext_descriptors arrays. */ +/* Returns starting index on success, -1 otherwise. */ STATIC signed_word GC_add_ext_descriptor(const word * bm, word nbits) { size_t nwords = divWORDSZ(nbits + WORDSZ-1); diff --git a/win32_threads.c b/win32_threads.c index 01929b9a..80b8c637 100644 --- a/win32_threads.c +++ b/win32_threads.c @@ -360,19 +360,18 @@ static struct GC_Thread_Rep first_thread; static GC_bool first_thread_used = FALSE; /* Add a thread to GC_threads. We assume it wasn't already there. */ -/* Caller holds allocation lock. */ /* Unlike the pthreads version, the id field is set by the caller. */ STATIC GC_thread GC_new_thread(DWORD id) { int hv = THREAD_TABLE_INDEX(id); GC_thread result; + GC_ASSERT(I_HOLD_LOCK()); # ifdef DEBUG_THREADS GC_log_printf("Creating thread 0x%lx\n", (long)id); if (GC_threads[hv] != NULL) GC_log_printf("Hash collision at GC_threads[%d]\n", hv); # endif - GC_ASSERT(I_HOLD_LOCK()); if (!EXPECT(first_thread_used, TRUE)) { result = &first_thread; first_thread_used = TRUE; @@ -543,8 +542,6 @@ GC_INLINE LONG GC_get_max_thread_index(void) /* without a lock, but should be called in contexts in which the */ /* requested thread cannot be asynchronously deleted, e.g. from the */ /* thread itself. */ -/* This version assumes that either GC_win32_dll_threads is set, or */ -/* we hold the allocator lock. */ /* Also used (for assertion checking only) from thread_local_alloc.c. */ STATIC GC_thread GC_lookup_thread_inner(DWORD thread_id) { @@ -564,9 +561,10 @@ STATIC GC_thread GC_lookup_thread_inner(DWORD thread_id) } else # endif /* else */ { - GC_thread p = GC_threads[THREAD_TABLE_INDEX(thread_id)]; + GC_thread p; GC_ASSERT(I_HOLD_LOCK()); + p = GC_threads[THREAD_TABLE_INDEX(thread_id)]; while (p != NULL && p -> id != thread_id) p = p -> tm.next; return p; @@ -594,9 +592,11 @@ STATIC GC_thread GC_lookup_thread_inner(DWORD thread_id) /* GC_check_finalizer_nested() is the same as in pthread_support.c. */ GC_INNER unsigned char *GC_check_finalizer_nested(void) { - GC_thread me = GC_lookup_thread_inner(GetCurrentThreadId()); + GC_thread me; unsigned nesting_level; + GC_ASSERT(I_HOLD_LOCK()); + me = GC_lookup_thread_inner(GetCurrentThreadId()); CHECK_LOOKUP_MY_THREAD(me); nesting_level = me->finalizer_nested; if (nesting_level) { @@ -677,9 +677,7 @@ GC_API void GC_CALL GC_register_altstack(void *stack GC_ATTR_UNUSED, /* been notified, then there may be more than one thread */ /* in the table with the same win32 id. */ /* This is OK, but we need a way to delete a specific one. */ -/* Assumes we hold the allocation lock unless */ -/* GC_win32_dll_threads is set. Does not actually free */ -/* GC_thread entry (only unlinks it). */ +/* Does not actually free GC_thread entry, only unlinks it. */ /* If GC_win32_dll_threads is set it should be called from the */ /* thread being deleted. */ STATIC void GC_delete_gc_thread_no_free(GC_vthread t) @@ -727,8 +725,7 @@ STATIC void GC_delete_gc_thread_no_free(GC_vthread t) } /* Delete a thread from GC_threads. We assume it is there. */ -/* (The code intentionally traps if it wasn't.) Assumes we */ -/* hold the allocation lock unless GC_win32_dll_threads is set. */ +/* (The code intentionally traps if it wasn't.) */ /* If GC_win32_dll_threads is set then it should be called from */ /* the thread being deleted. It is also safe to delete the */ /* main thread (unless GC_win32_dll_threads). */ @@ -1068,9 +1065,9 @@ GC_API void * GC_CALL GC_get_my_stackbottom(struct GC_stack_base *sb) /* We assume that this is only called for pthread ids that */ /* have not yet terminated or are still joinable, and */ /* cannot be concurrently terminated. */ - /* Assumes we do NOT hold the allocation lock. */ STATIC GC_thread GC_lookup_pthread(pthread_t id) { + GC_ASSERT(I_DONT_HOLD_LOCK()); # ifndef GC_NO_THREADS_DISCOVERY if (GC_win32_dll_threads) { int i; @@ -1806,8 +1803,8 @@ STATIC word GC_push_stack_for(GC_thread thread, DWORD me) return thread->stack_base - sp; /* stack grows down */ } -/* We hold allocation lock. Should do exactly the right thing if the */ -/* world is stopped. Should not fail if it isn't. */ +/* Should do exactly the right thing if the world is stopped; should */ +/* not fail if it is not. */ GC_INNER void GC_push_all_stacks(void) { DWORD thread_id = GetCurrentThreadId(); @@ -1816,6 +1813,8 @@ GC_INNER void GC_push_all_stacks(void) unsigned nthreads = 0; # endif word total_size = 0; + + GC_ASSERT(I_HOLD_LOCK()); # ifndef GC_NO_THREADS_DISCOVERY if (GC_win32_dll_threads) { int i; @@ -3205,7 +3204,6 @@ GC_INNER void GC_thr_init(void) /* Perform all initializations, including those that */ /* may require allocation. */ -/* Called without allocation lock. */ /* Must be called before a second thread is created. */ GC_INNER void GC_init_parallel(void) { @@ -3214,6 +3212,7 @@ GC_INNER void GC_init_parallel(void) DCL_LOCK_STATE; # endif + GC_ASSERT(I_DONT_HOLD_LOCK()); if (parallel_initialized) return; parallel_initialized = TRUE; /* GC_init() calls us back, so set flag first. */ |