Mirror of the gdb-patches mailing list
 help / color / mirror / Atom feed
From: Randolph Chung <randolph@tausq.org>
To: Andrew Cagney <cagney@gnu.org>
Cc: gdb-patches@sources.redhat.com
Subject: Re: [patch/rfa] Some fixes for hppa fallback unwinder
Date: Wed, 26 May 2004 04:02:00 -0000	[thread overview]
Message-ID: <20040526040215.GS7207@tausq.org> (raw)
In-Reply-To: <40B3B789.2030203@gnu.org>

> >i guess that works too. should i move it there?
> 
> Yes, feel free to commit that.

thanks. committed.

2004-05-25  Randolph Chung  <tausq@debian.org>

	* hppa-tdep.c (hppa_fallback_frame_cache): Handle stack adjustment,
	and multiple stack unwinds.
	(hppa_stub_unwind_sniffer): Handle unwind from pc == 0.

Index: hppa-tdep.c
===================================================================
RCS file: /cvs/src/src/gdb/hppa-tdep.c,v
retrieving revision 1.164
diff -u -p -r1.164 hppa-tdep.c
--- hppa-tdep.c	25 May 2004 03:55:23 -0000	1.164
+++ hppa-tdep.c	26 May 2004 03:56:38 -0000
@@ -1887,6 +1887,7 @@ static struct hppa_frame_cache *
 hppa_fallback_frame_cache (struct frame_info *next_frame, void **this_cache)
 {
   struct hppa_frame_cache *cache;
+  unsigned int frame_size;
   CORE_ADDR pc, start_pc, end_pc, cur_pc;
 
   cache = FRAME_OBSTACK_ZALLOC (struct hppa_frame_cache);
@@ -1895,6 +1896,7 @@ hppa_fallback_frame_cache (struct frame_
 
   pc = frame_func_unwind (next_frame);
   cur_pc = frame_pc_unwind (next_frame);
+  frame_size = 0;
 
   find_pc_partial_function (pc, NULL, &start_pc, &end_pc);
 
@@ -1914,21 +1916,18 @@ hppa_fallback_frame_cache (struct frame_
 
       insn = read_memory_unsigned_integer (pc, 4);
 
+      frame_size += prologue_inst_adjust_sp (insn);
+
       /* There are limited ways to store the return pointer into the
 	 stack.  */
       if (insn == 0x6bc23fd9) /* stw rp,-0x14(sr0,sp) */
-	{
-	  cache->saved_regs[HPPA_RP_REGNUM].addr = -20;
-	  break;
-	}
+	 cache->saved_regs[HPPA_RP_REGNUM].addr = -20;
       else if (insn == 0x0fc212c1) /* std rp,-0x10(sr0,sp) */
-	{
-	  cache->saved_regs[HPPA_RP_REGNUM].addr = -16;
-	  break;
-	}
+	 cache->saved_regs[HPPA_RP_REGNUM].addr = -16;
     }
 
-  cache->base = frame_unwind_register_unsigned (next_frame, HPPA_SP_REGNUM);
+  cache->base = frame_unwind_register_unsigned (next_frame, HPPA_SP_REGNUM) - frame_size;
+  trad_frame_set_value (cache->saved_regs, HPPA_SP_REGNUM, cache->base);
 
   if (trad_frame_addr_p (cache->saved_regs, HPPA_RP_REGNUM))
     {
@@ -2080,7 +2079,8 @@ hppa_stub_unwind_sniffer (struct frame_i
 {
   CORE_ADDR pc = frame_pc_unwind (next_frame);
 
-  if (IN_SOLIB_CALL_TRAMPOLINE (pc, NULL)
+  if (pc == 0
+      || IN_SOLIB_CALL_TRAMPOLINE (pc, NULL)
       || IN_SOLIB_RETURN_TRAMPOLINE (pc, NULL))
     return &hppa_stub_frame_unwind;
   return NULL;


      reply	other threads:[~2004-05-26  4:02 UTC|newest]

Thread overview: 5+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2004-05-22  6:34 Randolph Chung
2004-05-25 18:12 ` Andrew Cagney
2004-05-25 18:35   ` Randolph Chung
2004-05-25 21:16     ` Andrew Cagney
2004-05-26  4:02       ` Randolph Chung [this message]

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20040526040215.GS7207@tausq.org \
    --to=randolph@tausq.org \
    --cc=cagney@gnu.org \
    --cc=gdb-patches@sources.redhat.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox