diff options
author | Jim Blandy <jimb@redhat.com> | 2004-12-03 21:36:40 +0000 |
---|---|---|
committer | Jim Blandy <jimb@redhat.com> | 2004-12-03 21:36:40 +0000 |
commit | fe0b89bfb2c28baacc81ef2d6285328d2df4c1ab (patch) | |
tree | 935de8e26ba986a47145ded4b5820290676c3e50 | |
parent | ee96d8f73ef108101f6ec60004d4400a6debf584 (diff) | |
download | gdb-fe0b89bfb2c28baacc81ef2d6285328d2df4c1ab.tar.gz |
Remove code to handle threads exiting without generating a wait
status; that was a kernel bug, not an intended feature, and
there's no easy way to support both.
* lwp-pool.c (lwp_pool_continue_and_drop): Delete function.
* lwp-pool.h (lwp_pool_continue_and_drop): Delete declaration.
* thread-db.c (enum death_state): Delete type.
(struct gdbserv_thread): Remove 'death_state' member.
(add_thread_to_list): Don't initialize it.
(death_state_str, debug_report_death_state_change,
death_state_got_event, death_state_notified): Delete functions.
(handle_thread_db_event): Don't handle death events specially, and
don't handle the notifying thread specially.
(continue_thread): Don't treat threads whose death has been
foretold and who have completed their notification specially.
-rw-r--r-- | rda/unix/ChangeLog | 17 | ||||
-rw-r--r-- | rda/unix/lwp-pool.c | 28 | ||||
-rw-r--r-- | rda/unix/lwp-pool.h | 17 | ||||
-rw-r--r-- | rda/unix/thread-db.c | 173 |
4 files changed, 19 insertions, 216 deletions
diff --git a/rda/unix/ChangeLog b/rda/unix/ChangeLog index e5a541ad336..fbcb15bd13d 100644 --- a/rda/unix/ChangeLog +++ b/rda/unix/ChangeLog @@ -1,3 +1,20 @@ +2004-12-03 Jim Blandy <jimb@redhat.com> + + Remove code to handle threads exiting without generating a wait + status; that was a kernel bug, not an intended feature, and + there's no easy way to support both. + * lwp-pool.c (lwp_pool_continue_and_drop): Delete function. + * lwp-pool.h (lwp_pool_continue_and_drop): Delete declaration. + * thread-db.c (enum death_state): Delete type. + (struct gdbserv_thread): Remove 'death_state' member. + (add_thread_to_list): Don't initialize it. + (death_state_str, debug_report_death_state_change, + death_state_got_event, death_state_notified): Delete functions. + (handle_thread_db_event): Don't handle death events specially, and + don't handle the notifying thread specially. + (continue_thread): Don't treat threads whose death has been + foretold and who have completed their notification specially. + 2004-12-02 Jim Blandy <jimb@redhat.com> * lwp-pool.c: Doc fixes. (Use "LWP" instead of "thread".) diff --git a/rda/unix/lwp-pool.c b/rda/unix/lwp-pool.c index f4b9abf275e..e90dfbd98e9 100644 --- a/rda/unix/lwp-pool.c +++ b/rda/unix/lwp-pool.c @@ -1346,34 +1346,6 @@ lwp_pool_continue_lwp (pid_t pid, int signal) int -lwp_pool_continue_and_drop_lwp (pid_t pid, int signal) -{ - struct lwp *l = hash_find_known (pid); - int result = 0; - - if (debug_lwp_pool) - fprintf (stderr, "lwp_pool_continue_and_drop_lwp (%d, %d)\n", - (int) pid, signal); - - result = lwp_pool_continue_lwp (l->pid, signal); - if (result == 0) - { - hash_delete (l); - if (l->next) - queue_delete (l); - free (l); - - if (debug_lwp_pool) - fprintf (stderr, - " stopped -- %d --> freed\n", - (int) pid); - } - - return result; -} - - -int lwp_pool_singlestep_lwp (struct gdbserv *serv, pid_t lwp, int signal) { struct lwp *l = hash_find_known (lwp); diff --git a/rda/unix/lwp-pool.h b/rda/unix/lwp-pool.h index bee178777d4..1edfba9bffe 100644 --- a/rda/unix/lwp-pool.h +++ b/rda/unix/lwp-pool.h @@ -108,23 +108,6 @@ void lwp_pool_continue_all (void); int lwp_pool_continue_lwp (pid_t pid, int signal); -/* Continue LWP, and forget about it entirely. LWP should not produce - any further wait statuses. If SIGNAL is non-zero, continue it with - signal SIGNAL. Return zero on success, -1 on failure. (On - failure, the LWP is still present in the LWP pool.) - - Under NPTL, LWP's simply disappear, without becoming a zombie or - producing any wait status. At the kernel level, we have no way of - knowing that the LWP's PID is now free and may be reused --- - perhaps by an entirely different program! However, libthread_db - can provide its clients with events that indicate when a thread is - about to die; we must rely on that information to help us keep our - LWP table clean. Otherwise, we may find ourselves sending signals - to processes we don't have any relation to, and certainly won't get - wait statuses for. */ -int lwp_pool_continue_and_drop_lwp (pid_t pid, int signal); - - /* Continue PID in SERV for one instruction, delivering SIGNAL if it is non-zero, and stop with SIGSTOP if/when that instruction has been completed. diff --git a/rda/unix/thread-db.c b/rda/unix/thread-db.c index 5ea9aeabbea..74e80256e3b 100644 --- a/rda/unix/thread-db.c +++ b/rda/unix/thread-db.c @@ -184,57 +184,6 @@ next_undefined_symbol (void) */ -/* Under NPTL, LWP's simply disappear, without becoming a zombie or - producing any wait status. At the kernel level, we have no way of - knowing that the LWP's PID is now free and may be reused --- - perhaps by an entirely different program! So we need to use the - death events from libthread_db to help us make the right calls to - lwp_pool_continue_and_drop_lwp to keep our LWP table clean. - - There are two steps to delivering a TD_DEATH event: - - - first, the thread sends enqueues the event. - - - then, the thread takes some pre-negotiated action (hitting a - breakpoint; making a system call) to notify libthread_db's client - that there are events queued it should attend to. - - What's tricky here is that the queueing of the event and the - notification are not synchronized. Several threads could queue - events, and then perform their notification actions simultaneously. - So RDA could easily find TD_DEATH events for several threads in the - queue when the first of those threads performs its notification. - We need to continue to manage the remaining threads whose death is - foretold (are there any named Santiago?) until they have each - completed their notifications. - - And since RDA consumes all the events each time a notification is - received, we should be prepared to receive notifications even when - the queue is empty as well. - - 'enum death_state' helps us keep track of the state of a given - thread, so we can call lwp_pool_continue_and_drop_lwp on a thread - when its death has been foretold, and it has completed its - notification. */ - - -/* The thread_db death state. */ -enum death_state { - - /* We've received no indication that this thread will exit. */ - death_state_alive, - - /* We've received a TD_DEATH event for this thread, but it hasn't - completed its event notification yet. */ - death_state_event_received, - - /* We've received a TD_DEATH event for this thread, and it has - completed its event notification; we will continue it next using - lwp_pool_continue_and_drop_lwp. */ - death_state_drop_when_continued -}; - - /* Define the struct gdbserv_thread object. */ struct gdbserv_thread { @@ -289,9 +238,6 @@ struct gdbserv_thread { it actually is: they're all zombies. */ td_thrinfo_t ti; - /* The death state for this thread. */ - enum death_state death_state; - struct gdbserv_thread *next; } *thread_list; @@ -307,7 +253,6 @@ add_thread_to_list (td_thrinfo_t *ti) /* First cut -- add to start of list. */ memset (new, 0, sizeof (*new)); memcpy (&new->ti, ti, sizeof (td_thrinfo_t)); - new->death_state = death_state_alive; new->next = thread_list; thread_list = new; return new; @@ -1756,83 +1701,6 @@ thread_db_thread_info (struct gdbserv *serv, struct gdbserv_thread *thread) } -static const char * -death_state_str (enum death_state d) -{ - switch (d) - { - case death_state_alive: return "alive"; - case death_state_event_received: return "event_received"; - case death_state_drop_when_continued: return "drop_when_continued"; - default: - { - static char buf[100]; - sprintf (buf, "%d (unrecognized death_state)", d); - return buf; - } - } -} - - -static void -debug_report_death_state_change (struct gdbserv_thread *thread, - enum death_state old, - enum death_state new) -{ - if (thread_db_noisy && old != new) - fprintf (stderr, - "%19s -- %s -> %s\n", - death_state_str (old), - thread_debug_name (thread), - death_state_str (new)); -} - - -/* Record the fact that a TD_DEATH event was received for TID. */ -static void -death_state_got_event (thread_t tid) -{ - struct gdbserv_thread *thread = thread_list_lookup_by_tid (tid); - enum death_state old_state; - - if (thread_db_noisy) - fprintf (stderr, "death_state_got_event (0x%x)\n", (unsigned int) tid); - - if (! thread) - fprintf (stderr, "ERROR: death event for unknown thread 0x%x\n", - (unsigned int) tid); - - old_state = thread->death_state; - - if (thread->death_state == death_state_alive) - thread->death_state = death_state_event_received; - - debug_report_death_state_change (thread, old_state, thread->death_state); -} - - -/* Record the fact that THREAD has completed an event notification. - Call this for every thread that does an event notification, even - if there were no messages actually received, or if none of them - were TD_DEATH messages, or if none applied to the notifying thread. - The description of 'enum death_state' explains why. */ -static void -death_state_notified (struct gdbserv_thread *thread) -{ - enum death_state old_state; - - if (thread_db_noisy) - fprintf (stderr, "death_state_notified (%p)\n", thread); - - old_state = thread->death_state; - - if (thread->death_state == death_state_event_received) - thread->death_state = death_state_drop_when_continued; - - debug_report_death_state_change (thread, old_state, thread->death_state); -} - - /* If we are using the libthread_db event interface, and PROCESS is stopped at an event breakpoint, handle the event. @@ -1887,33 +1755,7 @@ handle_thread_db_event (struct child_process *process) Every time thread_db_check_child_state gets a wait status from waitpid, we call update_thread_list, so our list is always up to date; we don't actually need to do anything with - these messages for our own sake. - - However, the LWP pool module needs to be told when threads - are about to exit, since NPTL gives no kernel-level - indication of this. Threads just disappear. - - (Ignore the question, for now, of how RDA loses when threads - spawn off new threads after we've updated our list, but - before we've managed to send each of the LWP's a - SIGSTOP.) */ - - if (msg.event == TD_DEATH) - { - td_thrinfo_t ti; - - status = td_thr_get_info_p (msg.th_p, &ti); - if (status != TD_OK) - { - fprintf (stderr, - "error getting thread info on dying thread: %s\n", - thread_db_err_str (status)); - break; - } - - /* Note that this thread's death has been foretold. */ - death_state_got_event (ti.ti_tid); - } + these messages for our own sake. */ } /* Disable the event breakpoints while we step the thread across them. */ @@ -1941,9 +1783,6 @@ handle_thread_db_event (struct child_process *process) /* Re-insert the event breakpoints. */ insert_thread_db_event_breakpoints (serv); - /* Note that this thread has performed an event notification. */ - death_state_notified (thread); - /* If the wait status is a SIGTRAP signal, then that means the single-step proceeded normally. Otherwise, it's a new event we should deal with. */ @@ -1966,15 +1805,7 @@ continue_thread (struct gdbserv_thread *thread, int signal) thread_db_flush_regset_caches(); if (thread->ti.ti_lid != 0) - { - /* If this thread is now going to exit silently, just continue - it now and let it die. */ - if (thread->death_state == death_state_drop_when_continued - && signal == 0) - lwp_pool_continue_and_drop_lwp (thread->ti.ti_lid, 0); - else - lwp_pool_continue_lwp (thread->ti.ti_lid, signal); - } + lwp_pool_continue_lwp (thread->ti.ti_lid, signal); thread_db_invalidate_caches (); } |