2008-03-14 Pedro Alves * target.h (struct target_ops): Delete to_async_mask_value and add to_async_mask. (target_is_async_p, target_async): Formatting. (target_async_mask_value): Delete. (target_async_mask): Delete function declaration, and add new target macro with the same name. * target.c (update_current_target): Replace to_async_mask_value by to_async_mask. Default to_async_mask to return_one. (target_async_mask): Delete. (find_default_can_async_p, find_default_is_async_p): New. (init_dummy_target): register find_default_can_async_p and find_default_is_async_p on the dummy target. * linux-nat.c: Include inf-loop.h, event-loop.h and event-top.h. (debug_linux_nat_async): New global. (show_debug_linux_nat_async): New function. (linux_nat_async_permitted, linux_nat_async_permitted_1): New globals. (set_linux_nat_async_permitted, show_linux_nat_async_permitted): New functions. (linux_nat_async_enabled, linux_nat_async_mask_value) (linux_nat_event_pipe, linux_nat_num_queued_events) (linux_nat_async_events_enabled): New globals. (struct linux_queued_event): New struct. (cached_waitpid_queue): New global. (cached_waitpid, push_cached_waitpid, clear_cached_waitpid_queue): New. (my_waitpid): Call cached_waitpid. (linux_child_post_startup_inferior): Enable async mode. (linux_child_follow_fork): Renable async mode if needed. (sigchld_action, async_mask, async_old_mask, async_old_action): New global. (lin_lwp_attach_lwp): No need to mess with signal blocking in async mode. (linux_nat_attach): Implement async attaching. (linux_nat_detach): Disable async mode if it is enabled. (linux_nat_resume): If in async mode, mask async events on entry. If short circuiting, force event loop wake up. If resuming, set target_executing, and register target events in the event loop. (pipe_to_local_event_queue): New. (linux_nat_wait): In async mode, get events from the cached_waitpid_queue. If only discarded events are available, return to event loop. If the inferior stopped with an interesting event, clear target_executing, and register target events in the event loop. (linux_nat_mourn_inferior): Disable async mode if there are no more forks available, otherwise leave it on. (sigchld_handler): If in async mode, call internal_error. (linux_nat_is_async_p, linux_nat_can_async_p) (linux_nat_async_mask): New. (linux_nat_event_pipe_pop, linux_nat_event_pipe_push): New. (async_sigchld_handler): New. (linux_nat_async_events): New. (async_terminal_is_ours): New global. (linux_nat_terminal_inferior, linux_nat_terminal_ours): New. (async_client_callback, async_client_context): New. (linux_nat_async_file_handler): New. (linux_nat_async, linux_nat_disable_async) (linux_nat_enable_async): New. (linux_nat_add_target): Register linux_nat_can_async_p, linux_nat_is_async_p, linux_nat_async, linux_nat_async_mask, linux_nat_terminal_inferior and linux_nat_terminal_ours. (_initialize_linux_nat): Remove local action variable, and update code that used it to use sigchld_action. Add new "lin-lwp-async" debug set/show command. Add new "linux-async" set/show command. Setup async_mask and async_action. * linux-thread-db.c (thread_db_can_async_p,thread_db_is_async_p) (thread_db_async, thread_db_async_mask): New. (clear_lwpid_callback): Handle TARGET_WAITKIND_IGNORE. (init_thread_db_ops): Register thread_db_can_async_p, thread_db_is_async_p, thread_db_async and thread_db_async_mask. * remote.c (remote_async_mask_value): New. (remote_return_zero): New. (init_remote_ops): Register remote_return_zero as callbacks of to_can_async_p and to_is_async_p. (remote_can_async_p, remote_is_async_p, remote_async): Update to use remote_async_mask_value. (remote_async_mask): New. (init_remote_async_ops): Remove to_async_mask_value setting and register remote_async_mask as to_async_mask callback in remote_async_ops. * Makefile.in (linux-nat.o): Update. --- gdb/Makefile.in | 2 gdb/linux-nat.c | 814 +++++++++++++++++++++++++++++++++++++++++++++++--- gdb/linux-thread-db.c | 32 + gdb/remote.c | 26 + gdb/target.c | 33 +- gdb/target.h | 15 6 files changed, 855 insertions(+), 67 deletions(-) Index: src/gdb/Makefile.in =================================================================== --- src.orig/gdb/Makefile.in 2008-03-14 05:51:40.000000000 +0000 +++ src/gdb/Makefile.in 2008-03-14 06:06:24.000000000 +0000 @@ -2355,7 +2355,7 @@ linux-nat.o: linux-nat.c $(defs_h) $(inf $(gdb_wait_h) $(gdb_assert_h) $(linux_nat_h) $(gdbthread_h) \ $(gdbcmd_h) $(regcache_h) $(regset_h) $(inf_ptrace_h) $(auxv_h) \ $(elf_bfd_h) $(gregset_h) $(gdbcore_h) $(gdbthread_h) $(gdb_stat_h) \ - $(linux_fork_h) + $(linux_fork_h) $(inf_loop_h) $(event_loop_h) $(event_top_h) linux-thread-db.o: linux-thread-db.c $(defs_h) $(gdb_assert_h) \ $(gdb_proc_service_h) $(gdb_thread_db_h) $(bfd_h) $(exceptions_h) \ $(gdbthread_h) $(inferior_h) $(symfile_h) $(objfiles_h) $(target_h) \ Index: src/gdb/linux-nat.c =================================================================== --- src.orig/gdb/linux-nat.c 2008-03-14 06:04:45.000000000 +0000 +++ src/gdb/linux-nat.c 2008-03-14 06:39:42.000000000 +0000 @@ -46,6 +46,10 @@ #include "gdbthread.h" /* for struct thread_info etc. */ #include "gdb_stat.h" /* for struct stat */ #include /* for O_RDONLY */ +#include "inf-loop.h" +#include "event-loop.h" +#include "event-top.h" +#include #ifndef O_LARGEFILE #define O_LARGEFILE 0 @@ -113,6 +117,44 @@ show_debug_linux_nat (struct ui_file *fi value); } +static int debug_linux_nat_async = 0; +static void +show_debug_linux_nat_async (struct ui_file *file, int from_tty, + struct cmd_list_element *c, const char *value) +{ + fprintf_filtered (file, _("Debugging of GNU/Linux async lwp module is %s.\n"), + value); +} + + +/* Controls if async mode is permitted. */ +static int linux_nat_async_permitted = 1; + +/* The set command writes to this variable. Only if the inferior is + executing linux_nat_async_permitted is *not* updated. */ +static int linux_nat_async_permitted_1 = 1; + +static void +set_linux_nat_async_permitted (char *args, int from_tty, + struct cmd_list_element *c) +{ + if (target_has_execution) + { + linux_nat_async_permitted_1 = linux_nat_async_permitted; + error (_("Cannot change this setting while the target is running.")); + } + linux_nat_async_permitted = linux_nat_async_permitted_1; +} + +static void +show_linux_nat_async_permitted (struct ui_file *file, int from_tty, + struct cmd_list_element *c, const char *value) +{ + fprintf_filtered (file, _("Controlling the GNU/Linux inferior in asynchronous mode is %s.\n"), + value); +} + + static int linux_parent_pid; struct simple_pid_list @@ -133,6 +175,142 @@ static int linux_supports_tracefork_flag static int linux_supports_tracevforkdone_flag = -1; +/* Async mode support */ + +/* To listen to target events asynchronously, we install a SIGCHLD + handler whose duty is to call waitpid (-1, ..., WNOHANG) to get all + the pending events into a pipe. Whenever we're ready to handle + events asynchronously, this pipe is registered as the waitable file + handle in the event loop. When we get to entry target points + coming out of the common code (target_wait, target_resume), that + are going to call waitpid, we block SIGCHLD signals, and remove all + the events placed in the pipe into a local queue. All the + subsequent calls to my_waitpid (a waitpid wrapper) check this local + queue first. */ + +static int linux_nat_async_enabled; +static int linux_nat_async_mask_value = 1; + +static int linux_nat_event_pipe[2] = { -1, -1 }; +static volatile int linux_nat_num_queued_events; +static volatile int linux_nat_async_events_enabled; + +static void linux_nat_async_events (int enable); +static void pipe_to_local_event_queue (void); +static void linux_nat_event_pipe_push (int pid, int status, int options); +static int linux_nat_event_pipe_pop (int* ptr_status, int* ptr_options); +static void linux_nat_enable_async (void); +static void linux_nat_disable_async (void); + +struct linux_queued_event +{ + int pid; + int options; + int status; + struct linux_queued_event *next; +}; + +static struct linux_queued_event *cached_waitpid_queue = NULL; + +static int +cached_waitpid (int pid, int *status, int flags) +{ + struct linux_queued_event *msg = cached_waitpid_queue; + struct linux_queued_event *prev = NULL; + + if (debug_linux_nat_async) + fprintf_unfiltered (gdb_stdlog, + "CWPID: linux_nat_async_events_enabled(%d), linux_nat_num_queued_events(%d)\n", + linux_nat_async_events_enabled, linux_nat_num_queued_events); + + if (flags & __WALL) + { + for (; msg; prev = msg, msg = msg->next) + if (pid == -1 || pid == msg->pid) + break; + } + else if (flags & __WCLONE) + { + for (; msg; prev = msg, msg = msg->next) + if (msg->options & __WCLONE + && (pid == -1 || pid == msg->pid)) + break; + } + else + { + for (; msg; prev = msg, msg = msg->next) + if ((msg->options & __WCLONE) == 0 + && (pid == -1 || pid == msg->pid)) + break; + } + + if (msg) + { + int pid; + + if (prev) + prev->next = msg->next; + else + cached_waitpid_queue = msg->next; + + msg->next = NULL; + if (status) + *status = msg->status; + pid = msg->pid; + + if (debug_linux_nat_async) + fprintf_unfiltered (gdb_stdlog, "CWPID: pid(%d), status(%x)\n", + pid, msg->status); + xfree (msg); + + return pid; + } + + if (debug_linux_nat_async) + fprintf_unfiltered (gdb_stdlog, "CWPID: miss\n"); + + if (status) + *status = 0; + return -1; +} + +static void +push_cached_waitpid (int pid, int status, int options) +{ + struct linux_queued_event *event, *new_event; + + new_event = xmalloc (sizeof (*new_event)); + new_event->pid = pid; + new_event->status = status; + new_event->options = options; + new_event->next = NULL; + + if (cached_waitpid_queue) + { + for (event = cached_waitpid_queue; + event && event->next; + event = event->next) + ; + + event->next = new_event; + } + else + cached_waitpid_queue = new_event; +} + +static void +clear_cached_waitpid_queue (void) +{ + struct linux_queued_event *event = cached_waitpid_queue; + while (event) + { + struct linux_queued_event *next = event->next; + xfree (event); + event = next; + } + cached_waitpid_queue = NULL; +} + /* Trivial list manipulation functions to keep track of a list of new stopped processes. */ @@ -183,12 +361,18 @@ linux_tracefork_child (void) _exit (0); } -/* Wrapper function for waitpid which handles EINTR. */ +/* Wrapper function for waitpid which handles EINTR, and checks for + cached events. */ static int my_waitpid (int pid, int *status, int flags) { int ret; + + ret = cached_waitpid (pid, status, flags); + if (ret != -1) + return ret; + do { ret = waitpid (pid, status, flags); @@ -351,6 +535,12 @@ static void linux_child_post_startup_inferior (ptid_t ptid) { linux_enable_event_reporting (ptid); + + /* Checking for thread_db may throw a non-fatal error, so do this + before. */ + if (target_can_async_p ()) + linux_nat_enable_async (); + check_for_thread_db (); } @@ -361,6 +551,7 @@ linux_child_follow_fork (struct target_o struct target_waitstatus last_status; int has_vforked; int parent_pid, child_pid; + int async_was_enabled = linux_nat_async_enabled; get_last_target_status (&last_ptid, &last_status); has_vforked = (last_status.kind == TARGET_WAITKIND_VFORKED); @@ -516,6 +707,12 @@ linux_child_follow_fork (struct target_o target_detach (which does other necessary cleanup). */ push_target (ops); + + if (async_was_enabled && !linux_nat_async_enabled) + /* target_detach may disable async depending on multi-threaded + enabled or not. Reenable it if needed. */ + linux_nat_enable_async (); + linux_nat_switch_fork (inferior_ptid); check_for_thread_db (); @@ -616,8 +813,15 @@ static sigset_t normal_mask; _initialize_linux_nat. */ static sigset_t suspend_mask; +/* SIGCHLD action for synchronous mode. */ +struct sigaction sigchld_action; + /* Signals to block to make that sigsuspend work. */ static sigset_t blocked_mask; + +/* Signal mask and handler for async mode. */ +static sigset_t async_mask, async_old_mask; +static struct sigaction async_action, async_old_action; /* Prototypes for local functions. */ @@ -877,12 +1081,15 @@ lin_lwp_attach_lwp (ptid_t ptid) gdb_assert (is_lwp (ptid)); - /* Make sure SIGCHLD is blocked. We don't want SIGCHLD events - to interrupt either the ptrace() or waitpid() calls below. */ - if (!sigismember (&blocked_mask, SIGCHLD)) + if (!linux_nat_async_enabled) { - sigaddset (&blocked_mask, SIGCHLD); - sigprocmask (SIG_BLOCK, &blocked_mask, NULL); + /* Make sure SIGCHLD is blocked. We don't want SIGCHLD events + to interrupt either the ptrace() or waitpid() calls below. */ + if (!sigismember (&blocked_mask, SIGCHLD)) + { + sigaddset (&blocked_mask, SIGCHLD); + sigprocmask (SIG_BLOCK, &blocked_mask, NULL); + } } lp = find_lwp_pid (ptid); @@ -966,6 +1173,17 @@ linux_nat_attach (char *args, int from_t pid_t pid; int status; int cloned = 0; + sigset_t mask, prev_mask; + + /* Make sure SIGCHLD is blocked. The sync mode SIGCHLD handler + accesses the current_target, which is pushed by + linux_ops->to_attach below. This stops the race. It also + prevents the async mode to lose any SIGCHLD events, as the async + handler is only registered at function exit. */ + sigemptyset (&mask); + sigaddset (&mask, SIGCHLD); + sigprocmask (SIG_SETMASK, NULL, &prev_mask); + sigprocmask (SIG_BLOCK, &mask, NULL); /* FIXME: We should probably accept a list of process id's, and attach all of them. */ @@ -994,9 +1212,36 @@ linux_nat_attach (char *args, int from_t lp->stopped = 1; - /* Fake the SIGSTOP that core GDB expects. */ - lp->status = W_STOPCODE (SIGSTOP); - lp->resumed = 1; + if (target_can_async_p ()) + /* This needs to before the above, so the current target is + already set when the user didn't specify an exec file, and the + async SIGCHLD handler doesn't mess with the waitpid calls + above. */ + linux_nat_enable_async (); + + if (target_can_async_p () && !sync_execution) + /* No need to fake the SIGSTOP and stop all threads in WFI, we're + going to resume right away. */ + ; + else + { + /* Fake the SIGSTOP that core GDB expects. */ + lp->status = W_STOPCODE (SIGSTOP); + lp->resumed = 1; + if (target_can_async_p ()) + /* Wake event loop with special token, to get to WFI. */ + linux_nat_event_pipe_push (-1, -1, -1); + + if (debug_linux_nat) + { + fprintf_unfiltered (gdb_stdlog, + "LLA: waitpid %ld, faking SIGSTOP\n", (long) pid); + } + + /* Restore the original signal mask. */ + sigprocmask (SIG_SETMASK, NULL, &prev_mask); + } + if (debug_linux_nat) { fprintf_unfiltered (gdb_stdlog, @@ -1076,12 +1321,17 @@ linux_nat_detach (char *args, int from_t /* Destroy LWP info; it's no longer valid. */ init_lwp_list (); - /* Restore the original signal mask. */ - sigprocmask (SIG_SETMASK, &normal_mask, NULL); - sigemptyset (&blocked_mask); - inferior_ptid = pid_to_ptid (GET_PID (inferior_ptid)); linux_ops->to_detach (args, from_tty); + + if (target_can_async_p () && linux_nat_async_enabled) + linux_nat_disable_async (); + else + { + /* Restore the original signal mask. */ + sigprocmask (SIG_SETMASK, &normal_mask, NULL); + sigemptyset (&blocked_mask); + } } /* Resume LP. */ @@ -1135,6 +1385,13 @@ linux_nat_resume (ptid_t ptid, int step, prune_lwps (); + if (linux_nat_async_events_enabled) + { + /* Mask events while we're here. */ + linux_nat_async_events (0); + pipe_to_local_event_queue (); + } + /* A specific PTID means `step only this process id'. */ resume_all = (PIDGET (ptid) == -1); @@ -1199,6 +1456,13 @@ linux_nat_resume (ptid_t ptid, int step, "LLR: Short circuiting for status 0x%x\n", lp->status); + if (target_can_async_p () && linux_nat_async_enabled) + { + target_async (inferior_event_handler, 0); + + /* Wake event loop with special token, to get to WFI. */ + linux_nat_event_pipe_push (-1, -1, -1); + } return; } @@ -1212,12 +1476,17 @@ linux_nat_resume (ptid_t ptid, int step, linux_ops->to_resume (ptid, step, signo); memset (&lp->siginfo, 0, sizeof (lp->siginfo)); + target_executing = 1; + if (debug_linux_nat) fprintf_unfiltered (gdb_stdlog, "LLR: %s %s, %s (resume event thread)\n", step ? "PTRACE_SINGLESTEP" : "PTRACE_CONT", target_pid_to_str (ptid), signo ? strsignal (signo) : "0"); + + if (target_can_async_p () && linux_nat_async_enabled) + target_async (inferior_event_handler, 0); } /* Issue kill to specified lwp. */ @@ -2069,6 +2338,27 @@ linux_nat_filter_event (int lwpid, int s return lp; } +static void +pipe_to_local_event_queue (void) +{ + if (debug_linux_nat_async) + fprintf_unfiltered (gdb_stdlog, + "PTLEQ: linux_nat_num_queued_events(%d)\n", + linux_nat_num_queued_events); + while (linux_nat_num_queued_events) + { + int lwpid, status, options; + + lwpid = linux_nat_event_pipe_pop (&status, &options); + if (lwpid == -1 && status == -1 && options == -1) + /* Special wake up event loop token. */ + continue; + + gdb_assert (lwpid > 0); + push_cached_waitpid (lwpid, status, options); + } +} + static ptid_t linux_nat_wait (ptid_t ptid, struct target_waitstatus *ourstatus) { @@ -2077,6 +2367,15 @@ linux_nat_wait (ptid_t ptid, struct targ int status = 0; pid_t pid = PIDGET (ptid); sigset_t flush_mask; + struct sigaction entry_action; + + /* Handle switching the SIGCHLD handler between async/sync modes. + Sync mode is currently used even when async is enabled, to + implement inferior function calls. */ + sigaction (SIGCHLD, NULL, &entry_action); + + if (debug_linux_nat_async) + fprintf_unfiltered (gdb_stdlog, "LLW: enter\n"); /* The first time we get here after starting a new inferior, we may not have added it to the LWP list yet - this is the earliest @@ -2093,11 +2392,29 @@ linux_nat_wait (ptid_t ptid, struct targ sigemptyset (&flush_mask); - /* Make sure SIGCHLD is blocked. */ - if (!sigismember (&blocked_mask, SIGCHLD)) + if (linux_nat_async_events_enabled) { - sigaddset (&blocked_mask, SIGCHLD); - sigprocmask (SIG_BLOCK, &blocked_mask, NULL); + /* Block events while we're here. They'll be enabled on linux_nat_resume. */ + linux_nat_async_events (0); + /* Get events out of queue, and make them available to + cached_waitpid / my_waitpid. */ + pipe_to_local_event_queue (); + } + else + { + /* Make sure SIGCHLD is blocked. */ + if (!sigismember (&blocked_mask, SIGCHLD)) + { + sigaddset (&blocked_mask, SIGCHLD); + sigprocmask (SIG_BLOCK, &blocked_mask, NULL); + } + + /* We're in sync mode. Make sure SIGCHLD isn't handled by + async_sigchld_handler, which would call waitpid and put the + results in the event pipe. When we wake up from sigsuspend + below, we wan't to call waitpid ourselves. It is just easier + and more efficient to toggle the handler. */ + sigaction (SIGCHLD, &sigchld_action, NULL); } retry: @@ -2122,7 +2439,7 @@ retry: target_pid_to_str (lp->ptid)); } - /* But if we don't fine one, we'll have to wait, and check both + /* But if we don't find one, we'll have to wait, and check both cloned and uncloned processes. We start with the cloned processes. */ options = __WCLONE | WNOHANG; @@ -2189,7 +2506,13 @@ retry: { pid_t lwpid; - lwpid = my_waitpid (pid, &status, options); + if (target_can_async_p () && linux_nat_async_enabled) + /* In async mode, don't ever block. Only look at the locally + queued events. */ + lwpid = cached_waitpid (pid, &status, options); + else + lwpid = my_waitpid (pid, &status, options); + if (lwpid > 0) { gdb_assert (pid == -1 || lwpid == pid); @@ -2217,9 +2540,27 @@ retry: /* Alternate between checking cloned and uncloned processes. */ options ^= __WCLONE; - /* And suspend every time we have checked both. */ + /* And every time we have checked both: + In async mode, return to event loop; + In sync mode, suspend waiting for a SIGCHLD signal. */ if (options & __WCLONE) - sigsuspend (&suspend_mask); + { + if (target_can_async_p () && linux_nat_async_enabled) + { + /* No interesting event. */ + ourstatus->kind = TARGET_WAITKIND_IGNORE; + + /* Get ready for the next event. */ + target_async (inferior_event_handler, 0); + + if (debug_linux_nat_async) + fprintf_unfiltered (gdb_stdlog, "LLW: exit (ignore)\n"); + + return minus_one_ptid; + } + else + sigsuspend (&suspend_mask); + } } /* We shouldn't end up here unless we want to try again. */ @@ -2324,6 +2665,17 @@ retry: else store_waitstatus (ourstatus, status); + target_executing = 0; + + sigaction (SIGCHLD, &entry_action, NULL); + + /* Get ready for the next event. */ + if (target_can_async_p () && linux_nat_async_enabled) + target_async (inferior_event_handler, 0); + + if (debug_linux_nat_async) + fprintf_unfiltered (gdb_stdlog, "LLW: exit\n"); + return lp->ptid; } @@ -2438,18 +2790,27 @@ linux_nat_mourn_inferior (void) /* Destroy LWP info; it's no longer valid. */ init_lwp_list (); - /* Restore the original signal mask. */ - sigprocmask (SIG_SETMASK, &normal_mask, NULL); - sigemptyset (&blocked_mask); - if (! forks_exist_p ()) - /* Normal case, no other forks available. */ - linux_ops->to_mourn_inferior (); + { + if (target_can_async_p () && linux_nat_async_enabled) + linux_nat_disable_async (); + else + { + /* Restore the original signal mask. */ + sigprocmask (SIG_SETMASK, &normal_mask, NULL); + sigemptyset (&blocked_mask); + } + + /* Normal case, no other forks available. */ + linux_ops->to_mourn_inferior (); + } else - /* Multi-fork case. The current inferior_ptid has exited, but - there are other viable forks to debug. Delete the exiting - one and context-switch to the first available. */ - linux_fork_mourn_inferior (); + { + /* Multi-fork case. The current inferior_ptid has exited, but + there are other viable forks to debug. Delete the exiting + one and context-switch to the first available. */ + linux_fork_mourn_inferior (); + } } static LONGEST @@ -2512,6 +2873,11 @@ linux_nat_pid_to_str (ptid_t ptid) static void sigchld_handler (int signo) { + if (target_can_async_p () && linux_nat_async_enabled && signo == SIGCHLD) + /* It is *always* a bug to hit this. */ + internal_error (__FILE__, __LINE__, + "sigchld_handler called when async is enabled"); + /* Do nothing. The only reason for this handler is that it allows us to use sigsuspend in linux_nat_wait above to wait for the arrival of a SIGCHLD. */ @@ -3270,6 +3636,331 @@ linux_trad_target (CORE_ADDR (*register_ return t; } +/* target_is_async_p implementation. */ + +static int +linux_nat_is_async_p (void) +{ + /* NOTE: palves 2008-03-11: We're only async when the user requests + it explicitly with the "set linux-async" command. Someday, linux + will always be async. */ + if (!linux_nat_async_permitted) + return 0; + + return 1; +} + +/* target_can_async_p implementation. */ + +static int +linux_nat_can_async_p (void) +{ + /* NOTE: palves 2008-03-11: We're only async when the user requests + it explicitly with the "set linux-async" command. Someday, linux + will always be async. */ + if (!linux_nat_async_permitted) + return 0; + + /* See target.h/target_async_mask. */ + return linux_nat_async_mask_value; +} + +/* target_async_mask implementation. */ + +static int +linux_nat_async_mask (int mask) +{ + int current_state = linux_nat_async_mask_value; + linux_nat_async_mask_value = mask; + return current_state; +} + +/* Pop an event from the event pipe. */ + +static int +linux_nat_event_pipe_pop (int* ptr_status, int* ptr_options) +{ + struct linux_queued_event event = {0}; + int ret; + + do + { + ret = read (linux_nat_event_pipe[0], &event, sizeof (event)); + } + while (ret == -1 && errno == EINTR); + + gdb_assert (ret == sizeof (event)); + + *ptr_status = event.status; + *ptr_options = event.options; + + linux_nat_num_queued_events--; + + return event.pid; +} + +/* Push an event into the event pipe. */ + +static void +linux_nat_event_pipe_push (int pid, int status, int options) +{ + int ret; + struct linux_queued_event event = {0}; + event.pid = pid; + event.status = status; + event.options = options; + + do + { + ret = write (linux_nat_event_pipe[1], &event, sizeof (event)); + gdb_assert ((ret == -1 && errno == EINTR) || ret == sizeof (event)); + } while (ret == -1 && errno == EINTR); + + linux_nat_num_queued_events++; +} + +/* SIGCHLD handler for async mode. */ + +static void +async_sigchld_handler (int signo) +{ + int status, options, pid; + + if (!linux_nat_async_enabled || !linux_nat_async_events_enabled) + internal_error (__FILE__, __LINE__, + "async_sigchld_handler called with async masked"); + + while (1) + { + status = 0; + options = __WCLONE | WNOHANG; + + do + { + pid = waitpid (-1, &status, options); + } + while (pid == -1 && errno == EINTR); + + if (pid <= 0) + { + options = WNOHANG; + do + { + pid = waitpid (-1, &status, options); + } + while (pid == -1 && errno == EINTR); + } + + if (pid <= 0) + /* No more children reporting events. */ + break; + + /* If status == 0, the lwp is exiting. */ + + if (debug_linux_nat_async) + fprintf_unfiltered (gdb_stdlog, "\ +async_sigchld_handler: pid(%d), status(%x), options (%x)\n", + pid, status, options); + + linux_nat_event_pipe_push (pid, status, options); + } + + if (debug_linux_nat_async) + fprintf_unfiltered (gdb_stdlog, + "async_sigchld_handler: linux_nat_num_queued_events(%d)\n", + linux_nat_num_queued_events); +} + +/* Enable or disable async SIGCHLD handling. */ + +static void +linux_nat_async_events (int enable) +{ + if (debug_linux_nat_async) + fprintf_unfiltered (gdb_stdlog, + "LNAE: enable(%d): linux_nat_async_events_enabled: %d, linux_nat_num_queued_events: %d\n", + enable, linux_nat_async_events_enabled, linux_nat_num_queued_events); + + if (enable) + { + gdb_assert (!linux_nat_async_events_enabled); + + /* Unblock target events. */ + linux_nat_async_events_enabled = 1; + sigprocmask (SIG_SETMASK, &async_mask, &async_old_mask); + } + else + { + gdb_assert (linux_nat_async_events_enabled); + + /* Block target events. */ + sigprocmask (SIG_SETMASK, &async_old_mask, NULL); + linux_nat_async_events_enabled = 0; + } +} + +static int async_terminal_is_ours = 1; + +/* target_terminal_inferior implementation. */ + +static void +linux_nat_terminal_inferior (void) +{ + if (!target_is_async_p ()) + { + /* Async mode is disabled. */ + terminal_inferior (); + return; + } + + /* GDB should never give the terminal to the inferior, if the + inferior is running in the background (run&, continue&, etc.). + This check can be removed when the common code is fixed. */ + if (!sync_execution) + return; + + terminal_inferior (); + + if (!async_terminal_is_ours) + return; + + delete_file_handler (input_fd); + async_terminal_is_ours = 0; +} + +/* target_terminal_ours implementation. */ + +void +linux_nat_terminal_ours (void) +{ + if (!target_is_async_p ()) + { + /* Async mode is disabled. */ + terminal_ours (); + return; + } + + /* GDB should never give the terminal to the inferior if the + inferior is running in the background (run&, continue&, etc.), + but claiming it sure should. */ + terminal_ours (); + + if (!sync_execution) + return; + + if (async_terminal_is_ours) + return; + + add_file_handler (input_fd, stdin_event_handler, 0); + async_terminal_is_ours = 1; +} + +static void (*async_client_callback) (enum inferior_event_type event_type, + void *context); +static void *async_client_context; + +static void +linux_nat_async_file_handler (int error, gdb_client_data client_data) +{ + async_client_callback (INF_REG_EVENT, async_client_context); +} + +/* target_async implementation. */ + +static void +linux_nat_async (void (*callback) (enum inferior_event_type event_type, + void *context), void *context) +{ + if (! (target_can_async_p () && linux_nat_async_enabled)) + internal_error (__FILE__, __LINE__, + "Calling target_async when async is masked"); + + if (callback != NULL) + { + async_client_callback = callback; + async_client_context = context; + add_file_handler (linux_nat_event_pipe[0], + linux_nat_async_file_handler, NULL); + + if (!linux_nat_async_events_enabled) + linux_nat_async_events (1); + } + else + { + async_client_callback = callback; + async_client_context = context; + + if (linux_nat_async_events_enabled) + linux_nat_async_events (0); + delete_file_handler (linux_nat_event_pipe[0]); + } + return; +} + +/* Disable async mode. */ + +static void +linux_nat_disable_async (void) +{ + /* Unregister from event loop. Don't go through the target stack + (target_async), as linux-nat may have been poped off already. */ + linux_nat_async (NULL, 0); + + linux_nat_async_enabled = 0; + + sigaction (SIGCHLD, &async_old_action, NULL); + + /* Restore the original signal mask. */ + sigprocmask (SIG_SETMASK, &normal_mask, NULL); + sigemptyset (&blocked_mask); + + clear_cached_waitpid_queue (); + linux_nat_async_enabled = 0; + linux_nat_num_queued_events = 0; + + close (linux_nat_event_pipe[0]); + close (linux_nat_event_pipe[1]); + linux_nat_event_pipe[0] = linux_nat_event_pipe[1] = -1; +} + +/* Enable async mode. */ + +static void +linux_nat_enable_async (void) +{ + sigset_t mask; + + gdb_assert (linux_nat_async_enabled == 0); + gdb_assert (cached_waitpid_queue == NULL); + + /* Order matters. Disable SIGCHLD first so we don't hit the + assertion in sigchld. */ + + /* Make sure SIGCHLD is only delivered when we want. */ + sigprocmask (SIG_SETMASK, NULL, &mask); + if (!sigismember (&mask, SIGCHLD)) + { + sigaddset (&mask, SIGCHLD); + sigprocmask (SIG_BLOCK, &mask, NULL); + } + sigaction (SIGCHLD, &async_action, &async_old_action); + + if (pipe (linux_nat_event_pipe) == -1) + internal_error (__FILE__, __LINE__, + "creating event pipe failed."); + + fcntl (linux_nat_event_pipe[0], F_SETFL, O_NONBLOCK); + fcntl (linux_nat_event_pipe[1], F_SETFL, O_NONBLOCK); + + linux_nat_async_enabled = 1; + + /* Register in the event loop. Don't go through the target stack + (target_async), as we're not sure linux-nat is pushed on the + stack yet. We may be called as a result of of a + find_default_run_target. */ + linux_nat_async (inferior_event_handler, 0); +} + void linux_nat_add_target (struct target_ops *t) { @@ -3292,6 +3983,13 @@ linux_nat_add_target (struct target_ops t->to_pid_to_str = linux_nat_pid_to_str; t->to_has_thread_control = tc_schedlock; + t->to_can_async_p = linux_nat_can_async_p; + t->to_is_async_p = linux_nat_is_async_p; + t->to_async = linux_nat_async; + t->to_async_mask = linux_nat_async_mask; + t->to_terminal_inferior = linux_nat_terminal_inferior; + t->to_terminal_ours = linux_nat_terminal_ours; + /* We don't change the stratum; this target will sit at process_stratum and thread_db will set at thread_stratum. This is a little strange, since this is a multi-threaded-capable @@ -3329,8 +4027,6 @@ linux_nat_get_siginfo (ptid_t ptid) void _initialize_linux_nat (void) { - struct sigaction action; - add_info ("proc", linux_nat_info_proc_cmd, _("\ Show /proc process information about any running process.\n\ Specify any process id, or use the program being debugged by default.\n\ @@ -3340,13 +4036,39 @@ Specify any of the following keywords fo status -- list a different bunch of random process info.\n\ all -- list all available /proc info.")); + add_setshow_zinteger_cmd ("lin-lwp", no_class, &debug_linux_nat, _("\ +Set debugging of GNU/Linux lwp module."), _("\ +Show debugging of GNU/Linux lwp module."), _("\ +Enables printf debugging output."), + NULL, + show_debug_linux_nat, + &setdebuglist, &showdebuglist); + + add_setshow_zinteger_cmd ("lin-lwp-async", no_class, + &debug_linux_nat_async, _("\ +Set debugging of GNU/Linux async lwp module."), _("\ +Show debugging of GNU/Linux async lwp module."), _("\ +Enables printf debugging output."), + NULL, + show_debug_linux_nat_async, + &setdebuglist, &showdebuglist); + + add_setshow_boolean_cmd ("linux-async", class_obscure, + &linux_nat_async_permitted_1, _("\ +Set whether gdb controls the GNU/Linux inferior in asynchronous mode."), _("\ +Show whether gdb controls the GNU/Linux inferior in asynchronous mode."), _("\ +Tells gdb whether to control the GNU/Linux inferior in asynchronous mode."), + set_linux_nat_async_permitted, + show_linux_nat_async_permitted, + &setlist, &showlist); + /* Save the original signal mask. */ sigprocmask (SIG_SETMASK, NULL, &normal_mask); - action.sa_handler = sigchld_handler; - sigemptyset (&action.sa_mask); - action.sa_flags = SA_RESTART; - sigaction (SIGCHLD, &action, NULL); + sigchld_action.sa_handler = sigchld_handler; + sigemptyset (&sigchld_action.sa_mask); + sigchld_action.sa_flags = SA_RESTART; + sigaction (SIGCHLD, &sigchld_action, NULL); /* Make sure we don't block SIGCHLD during a sigsuspend. */ sigprocmask (SIG_SETMASK, NULL, &suspend_mask); @@ -3354,13 +4076,17 @@ Specify any of the following keywords fo sigemptyset (&blocked_mask); - add_setshow_zinteger_cmd ("lin-lwp", no_class, &debug_linux_nat, _("\ -Set debugging of GNU/Linux lwp module."), _("\ -Show debugging of GNU/Linux lwp module."), _("\ -Enables printf debugging output."), - NULL, - show_debug_linux_nat, - &setdebuglist, &showdebuglist); + /* Set up signal handling for asynchronous mode. */ + + /* Make sure we don't block SIGCHLD during a poll/select. */ + sigprocmask (SIG_SETMASK, NULL, &async_mask); + sigdelset (&async_mask, SIGCHLD); + + /* SIGCHLD handler for async mode. */ + async_action.sa_handler = async_sigchld_handler; + sigemptyset (&async_action.sa_mask); + async_action.sa_flags = SA_RESTART; + sigaddset (&async_action.sa_mask, SIGCHLD); } Index: src/gdb/linux-thread-db.c =================================================================== --- src.orig/gdb/linux-thread-db.c 2008-03-14 05:51:31.000000000 +0000 +++ src/gdb/linux-thread-db.c 2008-03-14 06:06:24.000000000 +0000 @@ -754,6 +754,31 @@ thread_db_detach (char *args, int from_t } static int +thread_db_can_async_p (void) +{ + return target_beneath->to_can_async_p (); +} + +static int +thread_db_is_async_p (void) +{ + return target_beneath->to_is_async_p (); +} + +static void +thread_db_async (void (*callback) (enum inferior_event_type event_type, + void *context), void *context) +{ + return target_beneath->to_async (callback, context); +} + +static int +thread_db_async_mask (int mask) +{ + return target_beneath->to_async_mask (mask); +} + +static int clear_lwpid_callback (struct thread_info *thread, void *dummy) { /* If we know that our thread implementation is 1-to-1, we could save @@ -871,6 +896,9 @@ thread_db_wait (ptid_t ptid, struct targ ptid = target_beneath->to_wait (ptid, ourstatus); + if (ourstatus->kind == TARGET_WAITKIND_IGNORE) + return ptid; + if (ourstatus->kind == TARGET_WAITKIND_EXITED || ourstatus->kind == TARGET_WAITKIND_SIGNALLED) return pid_to_ptid (-1); @@ -1134,6 +1162,10 @@ init_thread_db_ops (void) thread_db_ops.to_get_thread_local_address = thread_db_get_thread_local_address; thread_db_ops.to_extra_thread_info = thread_db_extra_thread_info; + thread_db_ops.to_can_async_p = thread_db_can_async_p; + thread_db_ops.to_is_async_p = thread_db_is_async_p; + thread_db_ops.to_async = thread_db_async; + thread_db_ops.to_async_mask = thread_db_async_mask; thread_db_ops.to_magic = OPS_MAGIC; } Index: src/gdb/remote.c =================================================================== --- src.orig/gdb/remote.c 2008-03-14 05:51:31.000000000 +0000 +++ src/gdb/remote.c 2008-03-14 06:06:24.000000000 +0000 @@ -481,6 +481,8 @@ static struct target_ops extended_remote extended_remote_ops, but with asynchronous support. */ static struct target_ops remote_async_ops; +static int remote_async_mask_value = 1; + static struct target_ops extended_async_remote_ops; /* FIXME: cagney/1999-09-23: Even though getpkt was called with @@ -7211,6 +7213,12 @@ remote_command (char *args, int from_tty help_list (remote_cmdlist, "remote ", -1, gdb_stdout); } +static int +remote_return_zero (void) +{ + return 0; +} + static void init_remote_ops (void) { @@ -7264,6 +7272,8 @@ Specify the serial device it is connecte remote_ops.to_flash_erase = remote_flash_erase; remote_ops.to_flash_done = remote_flash_done; remote_ops.to_read_description = remote_read_description; + remote_ops.to_can_async_p = remote_return_zero; + remote_ops.to_is_async_p = remote_return_zero; } /* Set up the extended remote vector by making a copy of the standard @@ -7291,14 +7301,14 @@ static int remote_can_async_p (void) { /* We're async whenever the serial device is. */ - return (current_target.to_async_mask_value) && serial_can_async_p (remote_desc); + return remote_async_mask_value && serial_can_async_p (remote_desc); } static int remote_is_async_p (void) { /* We're async whenever the serial device is. */ - return (current_target.to_async_mask_value) && serial_is_async_p (remote_desc); + return remote_async_mask_value && serial_is_async_p (remote_desc); } /* Pass the SERIAL event on and up to the client. One day this code @@ -7322,7 +7332,7 @@ static void remote_async (void (*callback) (enum inferior_event_type event_type, void *context), void *context) { - if (current_target.to_async_mask_value == 0) + if (remote_async_mask_value == 0) internal_error (__FILE__, __LINE__, _("Calling remote_async when async is masked")); @@ -7336,6 +7346,14 @@ remote_async (void (*callback) (enum inf serial_async (remote_desc, NULL, NULL); } +static int +remote_async_mask (int new_mask) +{ + int curr_mask = remote_async_mask_value; + remote_async_mask_value = new_mask; + return curr_mask; +} + /* Target async and target extended-async. This are temporary targets, until it is all tested. Eventually @@ -7395,7 +7413,7 @@ Specify the serial device it is connecte remote_async_ops.to_can_async_p = remote_can_async_p; remote_async_ops.to_is_async_p = remote_is_async_p; remote_async_ops.to_async = remote_async; - remote_async_ops.to_async_mask_value = 1; + remote_async_ops.to_async_mask = remote_async_mask; remote_async_ops.to_magic = OPS_MAGIC; remote_async_ops.to_memory_map = remote_memory_map; remote_async_ops.to_flash_erase = remote_flash_erase; Index: src/gdb/target.c =================================================================== --- src.orig/gdb/target.c 2008-03-14 05:51:31.000000000 +0000 +++ src/gdb/target.c 2008-03-14 06:11:07.000000000 +0000 @@ -472,7 +472,7 @@ update_current_target (void) INHERIT (to_can_async_p, t); INHERIT (to_is_async_p, t); INHERIT (to_async, t); - INHERIT (to_async_mask_value, t); + INHERIT (to_async_mask, t); INHERIT (to_find_memory_regions, t); INHERIT (to_make_corefile_notes, t); INHERIT (to_get_thread_local_address, t); @@ -648,6 +648,9 @@ update_current_target (void) de_fault (to_async, (void (*) (void (*) (enum inferior_event_type, void*), void*)) tcomplain); + de_fault (to_async_mask, + (int (*) (int)) + return_one); current_target.to_read_description = NULL; #undef de_fault @@ -1713,14 +1716,6 @@ target_disconnect (char *args, int from_ tcomplain (); } -int -target_async_mask (int mask) -{ - int saved_async_masked_status = target_async_mask_value; - target_async_mask_value = mask; - return saved_async_masked_status; -} - /* Look through the list of possible targets for a target that can follow forks. */ @@ -1819,6 +1814,24 @@ find_default_create_inferior (char *exec return; } +int +find_default_can_async_p (void) +{ + struct target_ops *t; + + t = find_default_run_target ("async"); + return (t->to_can_async_p) (); +} + +int +find_default_is_async_p (void) +{ + struct target_ops *t; + + t = find_default_run_target ("async"); + return (t->to_is_async_p) (); +} + static int default_region_ok_for_hw_watchpoint (CORE_ADDR addr, int len) { @@ -2083,6 +2096,8 @@ init_dummy_target (void) dummy_target.to_doc = ""; dummy_target.to_attach = find_default_attach; dummy_target.to_create_inferior = find_default_create_inferior; + dummy_target.to_can_async_p = find_default_can_async_p; + dummy_target.to_is_async_p = find_default_is_async_p; dummy_target.to_pid_to_str = normal_pid_to_str; dummy_target.to_stratum = dummy_stratum; dummy_target.to_find_memory_regions = dummy_find_memory_regions; Index: src/gdb/target.h =================================================================== --- src.orig/gdb/target.h 2008-03-14 05:51:31.000000000 +0000 +++ src/gdb/target.h 2008-03-14 06:06:24.000000000 +0000 @@ -415,9 +415,8 @@ struct target_ops /* ASYNC target controls */ int (*to_can_async_p) (void); int (*to_is_async_p) (void); - void (*to_async) (void (*cb) (enum inferior_event_type, void *context), - void *context); - int to_async_mask_value; + void (*to_async) (void (*) (enum inferior_event_type, void *), void *); + int (*to_async_mask) (int); int (*to_find_memory_regions) (int (*) (CORE_ADDR, unsigned long, int, int, int, @@ -946,11 +945,11 @@ int target_follow_fork (int follow_child #define target_can_async_p() (current_target.to_can_async_p ()) /* Is the target in asynchronous execution mode? */ -#define target_is_async_p() (current_target.to_is_async_p()) +#define target_is_async_p() (current_target.to_is_async_p ()) /* Put the target in async mode with the specified callback function. */ #define target_async(CALLBACK,CONTEXT) \ - (current_target.to_async((CALLBACK), (CONTEXT))) + (current_target.to_async ((CALLBACK), (CONTEXT))) /* This is to be used ONLY within call_function_by_hand(). It provides a workaround, to have inferior function calls done in sychronous @@ -966,10 +965,8 @@ int target_follow_fork (int follow_child the turning async on and off to the single execution commands, from where it is done currently, in remote_resume(). */ -#define target_async_mask_value \ - (current_target.to_async_mask_value) - -extern int target_async_mask (int mask); +#define target_async_mask(MASK) \ + (current_target.to_async_mask (MASK)) /* Converts a process id to a string. Usually, the string just contains `process xyz', but on some systems it may contain