Mirror of the gdb-patches mailing list
 help / color / mirror / Atom feed
From: ali_anwar <ali_anwar@codesourcery.com>
To: Pedro Alves <palves@redhat.com>
Cc: Tom Tromey <tromey@redhat.com>, <gdb-patches@sourceware.org>
Subject: Re: Updated patch for Bug 13217 - thread apply all detach throws a SEGFAULT
Date: Wed, 10 Jul 2013 10:31:00 -0000	[thread overview]
Message-ID: <51DD37FA.8010306@codesourcery.com> (raw)
In-Reply-To: <50C7628F.5080004@redhat.com>

[-- Attachment #1: Type: text/plain, Size: 2192 bytes --]

On 12/11/2012 09:42 PM, Pedro Alves wrote:
> On 12/11/2012 03:36 PM, ali_anwar wrote:
>> +  if (thread_count ())
>> +    {
>> +      struct thread_info *tp_array;
>> +      struct thread_info *tp;
>> +      int i, k;
>> +
>> +      /* Save a copy of the thread_list in case we execute detach
>> +         command.  */
>> +      tp_array =  xmalloc (sizeof (struct thread_info) * thread_count ());
>
> No need to compute the thread count twice, you can cache it.  No need to
> copy the whole thread structure.  Make this an array of a thread
> pointers, and then,
>
>> +      for (i = 0, tp = thread_list; tp; i++, tp = tp->next)
>> +        tp_array[i] = *tp;
>
>        ALL_THREADS (tp)
>          {
>            tp_array[i] = tp;
>            tp->refcount++;
>          }
>
> This increments the refcount of each current thread, so that attempts to
> delete it just mark it as deleted (so the C object remains valid).
>
>> +
>> +      for (k = 0; k != i; k++)
>> +        if (thread_alive (&tp_array[k]))
>
> and then write:
>
>        for (k = 0; k != i; k++)
>          {
>            if (thread_alive (tp_array[k]))
>              {
>                switch_to_thread (tp_array[k]->ptid);
>
>                printf_filtered (_("\nThread %d (%s):\n"),
>                                 (tp_array->num, target_pid_to_str (inferior_ptid));
>                execute_command (cmd, from_tty);
>                strcpy (cmd, saved_cmd);        /* Restore exact command used
>                                                   previously.  */
>              }
>          }
>
> And put this in a cleanup:
>
>        for (k = 0; k != i; k++)
>          tp_array[k]->refcount--;
>
> So that if the command throws an error, we still leave with the correct
> refcounts.
>
> The advantages are:
>
>   - less memory necessary for the array.
>   - handles the corner case of the target reusing a ptid (see
>     add_thread_silent).  IOW, this way, even if the command happens to
>     make the target reuse a ptid, "thread apply all" won't run the command
>     on that new threads my mistake.
>
>

I have tried to implement what you suggested in the attach patch. Does 
it look reasonable?

Thanks,
-Ali

[-- Attachment #2: Thread_apply_all.patch --]
[-- Type: text/x-patch, Size: 3787 bytes --]

Index: gdb/thread.c
===================================================================
RCS file: /cvs/src/src/gdb/thread.c,v
retrieving revision 1.153
diff -u -r1.153 thread.c
--- gdb/thread.c	11 Mar 2013 08:17:08 -0000	1.153
+++ gdb/thread.c	10 Jul 2013 09:51:08 -0000
@@ -65,6 +65,12 @@
 static void restore_current_thread (ptid_t);
 static void prune_threads (void);
 
+struct thread_array_cleanup {
+  struct thread_info **tp_array;
+  int count;
+};
+
+
 struct thread_info*
 inferior_thread (void)
 {
@@ -1125,6 +1131,15 @@
   xfree (old);
 }
 
+static void
+make_cleanup_thread_refcount (void *data)
+{
+  int k;
+  struct thread_array_cleanup *ta_cleanup = data;
+  for (k = 0; k != ta_cleanup->count; k++)
+    ta_cleanup->tp_array[k]->refcount--;
+}
+
 struct cleanup *
 make_cleanup_restore_current_thread (void)
 {
@@ -1176,13 +1191,13 @@
    thread apply 1 2 7 4 backtrace       Apply backtrace cmd to threads 1,2,7,4
    thread apply 2-7 9 p foo(1)  Apply p foo(1) cmd to threads 2->7 & 9
    thread apply all p x/i $pc   Apply x/i $pc cmd to all threads.  */
-
 static void
 thread_apply_all_command (char *cmd, int from_tty)
 {
-  struct thread_info *tp;
   struct cleanup *old_chain;
   char *saved_cmd;
+  int tc;
+  struct thread_array_cleanup ta_cleanup;
 
   if (cmd == NULL || *cmd == '\000')
     error (_("Please specify a command following the thread ID list"));
@@ -1195,17 +1210,41 @@
      execute_command.  */
   saved_cmd = xstrdup (cmd);
   make_cleanup (xfree, saved_cmd);
-  for (tp = thread_list; tp; tp = tp->next)
-    if (thread_alive (tp))
-      {
-	switch_to_thread (tp->ptid);
+  tc = thread_count ();
 
-	printf_filtered (_("\nThread %d (%s):\n"),
-			 tp->num, target_pid_to_str (inferior_ptid));
-	execute_command (cmd, from_tty);
-	strcpy (cmd, saved_cmd);	/* Restore exact command used
-					   previously.  */
-      }
+  if (tc)
+    {
+      struct thread_info **tp_array;
+      struct thread_info *tp;
+      int i, k;
+      i = 0;
+
+      /* Save a copy of the thread_list in case we execute detach
+         command.  */
+      tp_array =  xmalloc (sizeof (struct thread_info*) * tc);
+      ta_cleanup.tp_array = tp_array;
+      ta_cleanup.count = tc;
+
+      ALL_THREADS (tp)
+        {
+          tp_array[i] = tp;
+          tp->refcount++;
+          i++;
+        }
+      for (k = 0; k != i; k++)
+        if (thread_alive (tp_array[k]))
+          {
+            switch_to_thread (tp_array[k]->ptid);
+            printf_filtered (_("\nThread %d (%s):\n"),
+                             tp_array[k]->num, target_pid_to_str (inferior_ptid));
+            execute_command (cmd, from_tty);
+            strcpy (cmd, saved_cmd);        /* Restore exact command used
+                                               previously.  */
+           }
+
+      make_cleanup (xfree, tp_array);
+      make_cleanup (make_cleanup_thread_refcount, &ta_cleanup);
+    }
 
   do_cleanups (old_chain);
 }
Index: gdb/testsuite/gdb.threads/threadapply.exp
===================================================================
RCS file: /cvs/src/src/gdb/testsuite/gdb.threads/threadapply.exp,v
retrieving revision 1.16
diff -u -r1.16 threadapply.exp
--- gdb/testsuite/gdb.threads/threadapply.exp	1 Jan 2013 06:41:27 -0000	1.16
+++ gdb/testsuite/gdb.threads/threadapply.exp	10 Jul 2013 09:51:08 -0000
@@ -63,3 +63,4 @@
 gdb_test "up" ".*in main.*" "go up in the stack frame" 
 gdb_test "thread apply all print 1"  "Thread ..*\\\$\[0-9]+ = 1.*Thread ..*\\\$\[0-9]+ = 1.*Thread ..*\\\$\[0-9]+ = 1.*Thread ..*\\\$\[0-9]+ = 1.*Thread ..*\\\$\[0-9]+ = 1.*Thread ..*\\\$\[0-9]+ = 1" "run a simple print command on all threads"
 gdb_test "down" "#0.*thread_function.*" "go down and check selected frame"
+gdb_test "thread apply all detach" "Thread .*"


  reply	other threads:[~2013-07-10 10:31 UTC|newest]

Thread overview: 14+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2012-09-26 11:46 ali_anwar
2012-09-26 11:50 ` ali_anwar
2012-09-27 15:27 ` Jan Kratochvil
2012-09-27 15:32 ` Tom Tromey
2012-12-10 18:37   ` ali_anwar
2012-12-10 20:20     ` Tom Tromey
2012-12-11 15:37       ` ali_anwar
2012-12-11 16:43         ` Pedro Alves
2013-07-10 10:31           ` ali_anwar [this message]
2013-07-10 12:57             ` Joel Brobecker
2013-07-10 17:55             ` Tom Tromey
2013-07-11 12:30               ` ali_anwar
2013-07-13 21:13                 ` Tom Tromey
2013-07-15 11:20                   ` ali_anwar

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=51DD37FA.8010306@codesourcery.com \
    --to=ali_anwar@codesourcery.com \
    --cc=gdb-patches@sourceware.org \
    --cc=palves@redhat.com \
    --cc=tromey@redhat.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox