Mirror of the gdb-patches mailing list
 help / color / mirror / Atom feed
From: Nick Clifton <nickc@redhat.com>
To: gdb-patches@sources.redhat.com
Subject: RFA: Allow set/display of e500 accumulator and status registers.
Date: Fri, 22 Nov 2002 10:46:00 -0000	[thread overview]
Message-ID: <m3of8ho07e.fsf@north-pole.nickc.cambridge.redhat.com> (raw)

Hi Guys,

  May I apply the following patch developed by Andrew Cagney to allow
  the accumulator and status registers of the PowerPC e500 core to be
  displayed and edited ?  Applying the patch produces no new failures
  in the gdb testsuite for the powerpc-eabispe target.

Cheers
        Nick

2002-11-22  Andrew Cagney  <cagney@redhat.com>

	* rs6000-tdep.c: Include "gdb_assert.h".
	(registers_e500): Add "acc" and "spefscr".
	(PPC_GPRS_PSEUDO_REGS): Remove trailing comma.
	(rs6000_gdbarch_init): Update initialization of ppc_gp0_regnum,
	ppc_gplast_regnum, sp_regnum and fp_regnum.  Check that gp0_regnum
	really is "r0".

Index: gdb/rs6000-tdep.c
===================================================================
RCS file: /cvs/src/src/gdb/rs6000-tdep.c,v
retrieving revision 1.93
diff -c -3 -p -w -r1.93 rs6000-tdep.c
*** gdb/rs6000-tdep.c	18 Nov 2002 22:19:29 -0000	1.93
--- gdb/rs6000-tdep.c	22 Nov 2002 18:42:18 -0000
***************
*** 46,51 ****
--- 46,53 ----
  #include "solib-svr4.h"
  #include "ppc-tdep.h"
  
+ #include "gdb_assert.h"
+ 
  /* If the kernel has to deliver a signal, it pushes a sigcontext
     structure on the stack and then calls the signal handler, passing
     the address of the sigcontext in an argument register. Usually
*************** rs6000_convert_from_func_ptr_addr (CORE_
*** 2268,2274 ****
    /*  0 */ P(r0), P(r1), P(r2), P(r3), P(r4), P(r5), P(r6), P(r7),  \
    /*  8 */ P(r8), P(r9), P(r10),P(r11),P(r12),P(r13),P(r14),P(r15), \
    /* 16 */ P(r16),P(r17),P(r18),P(r19),P(r20),P(r21),P(r22),P(r23), \
!   /* 24 */ P(r24),P(r25),P(r26),P(r27),P(r28),P(r29),P(r30),P(r31), \
  
  /* IBM POWER (pre-PowerPC) architecture, user-level view.  We only cover
     user-level SPR's.  */
--- 2270,2276 ----
    /*  0 */ P(r0), P(r1), P(r2), P(r3), P(r4), P(r5), P(r6), P(r7),  \
    /*  8 */ P(r8), P(r9), P(r10),P(r11),P(r12),P(r13),P(r14),P(r15), \
    /* 16 */ P(r16),P(r17),P(r18),P(r19),P(r20),P(r21),P(r22),P(r23), \
!   /* 24 */ P(r24),P(r25),P(r26),P(r27),P(r28),P(r29),P(r30),P(r31)
  
  /* IBM POWER (pre-PowerPC) architecture, user-level view.  We only cover
     user-level SPR's.  */
*************** static const struct reg registers_e500[]
*** 2447,2452 ****
--- 2449,2457 ----
    PPC_UISA_NOFP_SPRS,
    /* 7...38 */
    PPC_EV_REGS,
+   R(acc), R(spefscr),
+   /* NOTE: Add new registers here the end of the raw register
+      list and just before the first pseudo register.  */
    /* 39...70 */
    PPC_GPRS_PSEUDO_REGS
  };
*************** rs6000_gdbarch_init (struct gdbarch_info
*** 2803,2810 ****
  	tdep->ppc_ev31_regnum = -1;
  	break;
        case bfd_mach_ppc_e500:
!         tdep->ppc_gp0_regnum = 39;
!         tdep->ppc_gplast_regnum = 70;
          tdep->ppc_toc_regnum = -1;
          tdep->ppc_ps_regnum = 1;
          tdep->ppc_cr_regnum = 2;
--- 2808,2815 ----
  	tdep->ppc_ev31_regnum = -1;
  	break;
        case bfd_mach_ppc_e500:
!         tdep->ppc_gp0_regnum = 41;
!         tdep->ppc_gplast_regnum = tdep->ppc_gp0_regnum + 32 - 1;
          tdep->ppc_toc_regnum = -1;
          tdep->ppc_ps_regnum = 1;
          tdep->ppc_cr_regnum = 2;
*************** rs6000_gdbarch_init (struct gdbarch_info
*** 2814,2821 ****
  	tdep->ppc_ev0_regnum = 7;
  	tdep->ppc_ev31_regnum = 38;
          set_gdbarch_pc_regnum (gdbarch, 0);
!         set_gdbarch_sp_regnum (gdbarch, 40);
!         set_gdbarch_fp_regnum (gdbarch, 40);
          set_gdbarch_dwarf2_reg_to_regnum (gdbarch, e500_dwarf2_reg_to_regnum);
          set_gdbarch_pseudo_register_read (gdbarch, e500_pseudo_register_read);
          set_gdbarch_pseudo_register_write (gdbarch, e500_pseudo_register_write);
--- 2819,2826 ----
  	tdep->ppc_ev0_regnum = 7;
  	tdep->ppc_ev31_regnum = 38;
          set_gdbarch_pc_regnum (gdbarch, 0);
!         set_gdbarch_sp_regnum (gdbarch, tdep->ppc_gp0_regnum + 1);
!         set_gdbarch_fp_regnum (gdbarch, tdep->ppc_gp0_regnum + 1);
          set_gdbarch_dwarf2_reg_to_regnum (gdbarch, e500_dwarf2_reg_to_regnum);
          set_gdbarch_pseudo_register_read (gdbarch, e500_pseudo_register_read);
          set_gdbarch_pseudo_register_write (gdbarch, e500_pseudo_register_write);
*************** rs6000_gdbarch_init (struct gdbarch_info
*** 2829,2834 ****
--- 2834,2842 ----
  	tdep->ppc_ev31_regnum = -1;
  	break;
        }   
+ 
+   /* Sanity check on registers.  */
+   gdb_assert (strcmp (tdep->regs[tdep->ppc_gp0_regnum].name, "r0") == 0);
  
    /* Set lr_frame_offset.  */
    if (wordsize == 8)


             reply	other threads:[~2002-11-22 18:46 UTC|newest]

Thread overview: 5+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2002-11-22 10:46 Nick Clifton [this message]
2002-12-18 14:46 ` Andrew Cagney
2003-02-19  0:12   ` Andrew Cagney
2003-02-19 18:31     ` Kevin Buettner
2003-02-19 18:37       ` Andrew Cagney

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=m3of8ho07e.fsf@north-pole.nickc.cambridge.redhat.com \
    --to=nickc@redhat.com \
    --cc=gdb-patches@sources.redhat.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox