From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: (qmail 4154 invoked by alias); 16 Jun 2012 11:27:30 -0000 Received: (qmail 4146 invoked by uid 22791); 16 Jun 2012 11:27:29 -0000 X-SWARE-Spam-Status: No, hits=-2.9 required=5.0 tests=AWL,BAYES_00,KHOP_THREADED,TW_XF,T_RP_MATCHES_RCVD X-Spam-Check-By: sourceware.org Received: from sibelius.xs4all.nl (HELO glazunov.sibelius.xs4all.nl) (83.163.83.176) by sourceware.org (qpsmtpd/0.43rc1) with ESMTP; Sat, 16 Jun 2012 11:27:16 +0000 Received: from glazunov.sibelius.xs4all.nl (kettenis@localhost [127.0.0.1]) by glazunov.sibelius.xs4all.nl (8.14.5/8.14.3) with ESMTP id q5GBRC7d004180; Sat, 16 Jun 2012 13:27:12 +0200 (CEST) Received: (from kettenis@localhost) by glazunov.sibelius.xs4all.nl (8.14.5/8.14.3/Submit) id q5GBRASA028790; Sat, 16 Jun 2012 13:27:11 +0200 (CEST) Date: Sat, 16 Jun 2012 11:27:00 -0000 Message-Id: <201206161127.q5GBRASA028790@glazunov.sibelius.xs4all.nl> From: Mark Kettenis To: hjl.tools@gmail.com CC: gdb-patches@sourceware.org In-reply-to: <20120613230825.GA11653@intel.com> (hongjiu.lu@intel.com) Subject: Re: [PATCH 5/5] Add x32 support to amd64_analyze_stack_align References: <20120613230825.GA11653@intel.com> Mailing-List: contact gdb-patches-help@sourceware.org; run by ezmlm Precedence: bulk List-Id: List-Subscribe: List-Archive: List-Post: List-Help: , Sender: gdb-patches-owner@sourceware.org X-SW-Source: 2012-06/txt/msg00541.txt.bz2 > Date: Wed, 13 Jun 2012 16:08:25 -0700 > From: "H.J. Lu" > > Hi, > > This patch adds support to amd64_analyze_stack_align. OK to install? Sorry, no. I think this function is complex enough as it is now. Please create a seperate function for x32. That may lead to some code duplication, but that is better than increasing the complexity of this already complex function. > H.J. > --- > * amd64-tdep.c (amd64_analyze_stack_align): Add an argumet > to indicate x32 and add x32 support. > (amd64_analyze_prologue): Update amd64_analyze_stack_align call. > > --- > gdb/amd64-tdep.c | 88 +++++++++++++++++++++++++++++++++++++++--------------- > 1 files changed, 64 insertions(+), 24 deletions(-) > > diff --git a/gdb/amd64-tdep.c b/gdb/amd64-tdep.c > index dd2da2f..9bcf845 100644 > --- a/gdb/amd64-tdep.c > +++ b/gdb/amd64-tdep.c > @@ -1714,7 +1714,7 @@ amd64_alloc_frame_cache (void) > > static CORE_ADDR > amd64_analyze_stack_align (CORE_ADDR pc, CORE_ADDR current_pc, > - struct amd64_frame_cache *cache) > + struct amd64_frame_cache *cache, int is_x32) > { > /* There are 2 code sequences to re-align stack before the frame > gets set up: > @@ -1725,6 +1725,12 @@ amd64_analyze_stack_align (CORE_ADDR pc, CORE_ADDR current_pc, > andq $-XXX, %rsp > pushq -8(%reg) > > + For x32, it can be > + > + [addr32] leal 8(%rsp), %reg > + andl $-XXX, %esp > + [addr32] pushq -8(%reg) > + > 2. Use a callee-saved saved register: > > pushq %reg > @@ -1732,56 +1738,72 @@ amd64_analyze_stack_align (CORE_ADDR pc, CORE_ADDR current_pc, > andq $-XXX, %rsp > pushq -8(%reg) > > + For x32, it can be > + > + pushq %reg > + [addr32] leal 16(%rsp), %reg > + andl $-XXX, %esp > + [addr32] pushq -8(%reg) > + > "andq $-XXX, %rsp" can be either 4 bytes or 7 bytes: > > 0x48 0x83 0xe4 0xf0 andq $-16, %rsp > 0x48 0x81 0xe4 0x00 0xff 0xff 0xff andq $-256, %rsp > + > + "andl $-XXX, %esp" can be either 3 bytes or 6 bytes: > + > + 0x83 0xe4 0xf0 andl $-16, %esp > + 0x81 0xe4 0x00 0xff 0xff 0xff andl $-256, %esp > */ > > - gdb_byte buf[18]; > + gdb_byte buf[19]; > int reg, r; > int offset, offset_and; > > if (target_read_memory (pc, buf, sizeof buf)) > return pc; > > + /* Skip optional addr32 prefix for x32. */ > + offset = 0; > + if (is_x32 && buf[0] == 0x67) > + offset++; > + > /* Check caller-saved saved register. The first instruction has > - to be "leaq 8(%rsp), %reg". */ > - if ((buf[0] & 0xfb) == 0x48 > - && buf[1] == 0x8d > - && buf[3] == 0x24 > - && buf[4] == 0x8) > + to be "leaq 8(%rsp), %reg" or "leal 8(%rsp), %reg". */ > + if (((buf[offset] & 0xfb) == 0x48 > + || (is_x32 && (buf[offset] & 0xfb) == 0x40)) > + && buf[offset + 1] == 0x8d > + && buf[offset + 3] == 0x24 > + && buf[offset + 4] == 0x8) > { > /* MOD must be binary 10 and R/M must be binary 100. */ > - if ((buf[2] & 0xc7) != 0x44) > + if ((buf[offset + 2] & 0xc7) != 0x44) > return pc; > > /* REG has register number. */ > - reg = (buf[2] >> 3) & 7; > + reg = (buf[offset + 2] >> 3) & 7; > > /* Check the REX.R bit. */ > - if (buf[0] == 0x4c) > + if ((buf[offset] & 0x4) != 0) > reg += 8; > > - offset = 5; > + offset += 5; > } > else > { > /* Check callee-saved saved register. The first instruction > has to be "pushq %reg". */ > reg = 0; > - if ((buf[0] & 0xf8) == 0x50) > - offset = 0; > - else if ((buf[0] & 0xf6) == 0x40 > - && (buf[1] & 0xf8) == 0x50) > + if ((buf[offset] & 0xf6) == 0x40 > + && (buf[offset + 1] & 0xf8) == 0x50) > { > /* Check the REX.B bit. */ > - if ((buf[0] & 1) != 0) > + if ((buf[offset] & 1) != 0) > reg = 8; > > - offset = 1; > + offset += 1; > } > - else > + else if ((buf[offset] & 0xf8) != 0x50) > return pc; > > /* Get register. */ > @@ -1789,8 +1811,14 @@ amd64_analyze_stack_align (CORE_ADDR pc, CORE_ADDR current_pc, > > offset++; > > - /* The next instruction has to be "leaq 16(%rsp), %reg". */ > - if ((buf[offset] & 0xfb) != 0x48 > + /* Skip optional addr32 prefix for x32. */ > + if (is_x32 && buf[offset] == 0x67) > + offset++; > + > + /* The next instruction has to be "leaq 16(%rsp), %reg" or > + "leal 16(%rsp), %reg". */ > + if (((buf[offset] & 0xfb) != 0x48 > + && (!is_x32 || (buf[offset] & 0xfb) != 0x40)) > || buf[offset + 1] != 0x8d > || buf[offset + 3] != 0x24 > || buf[offset + 4] != 0x10) > @@ -1804,7 +1832,7 @@ amd64_analyze_stack_align (CORE_ADDR pc, CORE_ADDR current_pc, > r = (buf[offset + 2] >> 3) & 7; > > /* Check the REX.R bit. */ > - if (buf[offset] == 0x4c) > + if ((buf[offset] & 0x4) != 0) > r += 8; > > /* Registers in pushq and leaq have to be the same. */ > @@ -1819,14 +1847,25 @@ amd64_analyze_stack_align (CORE_ADDR pc, CORE_ADDR current_pc, > return pc; > > /* The next instruction has to be "andq $-XXX, %rsp". */ > - if (buf[offset] != 0x48 > - || buf[offset + 2] != 0xe4 > + if (buf[offset] != 0x48) > + { > + if (!is_x32) > + return pc; > + /* X32 may have "andl $-XXX, %esp". */ > + offset--; > + } > + > + if (buf[offset + 2] != 0xe4 > || (buf[offset + 1] != 0x81 && buf[offset + 1] != 0x83)) > return pc; > > offset_and = offset; > offset += buf[offset + 1] == 0x81 ? 7 : 4; > > + /* Skip optional addr32 prefix for x32. */ > + if (is_x32 && buf[offset] == 0x67) > + offset++; > + > /* The next instruction has to be "pushq -8(%reg)". */ > r = 0; > if (buf[offset] == 0xff) > @@ -1898,7 +1937,8 @@ amd64_analyze_prologue (struct gdbarch *gdbarch, > if (current_pc <= pc) > return current_pc; > > - pc = amd64_analyze_stack_align (pc, current_pc, cache); > + pc = amd64_analyze_stack_align (pc, current_pc, cache, > + gdbarch_ptr_bit (gdbarch) == 32); > > op = read_memory_unsigned_integer (pc, 1, byte_order); > > -- > 1.7.6.5 > >