From: Yao Qi <yao@codesourcery.com>
To: gdb-patches@sourceware.org
Subject: Re: [patch 2/3] Displaced stepping for 16-bit Thumb instructions
Date: Sat, 25 Dec 2010 17:09:00 -0000 [thread overview]
Message-ID: <4D15FCD1.7070706@codesourcery.com> (raw)
In-Reply-To: <4D15F9B8.5070705@codesourcery.com>
[-- Attachment #1: Type: text/plain, Size: 270 bytes --]
Current implementation of displaced stepping in ARM assumes instruction
size is fixed 32-bit. Patch 2 is to rewrite some infrastructure code to
be ready to handle non-32-bit instructions. This patch doesn't change
any GDB functionality either.
--
Yao (é½å°§)
[-- Attachment #2: arm_disp_step_refactor_for_thumb_p2.patch --]
[-- Type: text/x-patch, Size: 15201 bytes --]
2010-12-25 Yao Qi <yao@codesourcery.com>
Handle both 32-bit and 16-bit insns for displaced stepping.
* gdb/arm-tdep.h (struct displaced_step_closure): Add new field
modinsns.
Remove field modinsn.
(RECORD_MOD_32BIT_INSN): New macro.
(RECORD_MOD_16BIT_INSN): New macro.
(RECORD_MOD_INSN): New macro.
* gdb/arm-tdep.c (arm_displaced_step_breakpoint_offset): New.
(cleanup_branch): Replace magic number by macros.
(copy_unmodified): Save modified insns by RECORD_MOD_32BIT_INSN.
(copy_preload): Likewise.
(copy_preload_reg): Likewise.
(copy_copro_load_store): Likewise.
(copy_b_bl_blx): Likewise.
(copy_bx_blx_reg): Likewise.
(copy_alu_imm): Likewise.
(copy_alu_reg): Likewise.
(copy_alu_shifted_reg): Likewise.
(copy_extra_ld_st): Likewise.
(copy_ldr_str_ldrb_strb): Likewise.
(copy_block_xfer): Likewise.
(copy_svc): Likewise.
(copy_undef): Likewise.
(copy_unpred): Likewise.
(decode_svc_copro): Likewise.
(arm_displaced_init_closure): Handle both 32bit and 16bit insns.
(arm_displaced_step_fixup): Likewise.
* gdb/arm-linux-tdep.c (arm_linux_copy_svc): Save modified insns by
RECORD_MOD_32BIT_INSN.
(arm_catch_kernel_helper_return): Likewise.
diff --git a/gdb/arm-linux-tdep.c b/gdb/arm-linux-tdep.c
index 06f386a..b20b44f 100644
--- a/gdb/arm-linux-tdep.c
+++ b/gdb/arm-linux-tdep.c
@@ -827,7 +827,7 @@ arm_linux_copy_svc (struct gdbarch *gdbarch, uint32_t insn, CORE_ADDR to,
Cleanup: if pc lands in scratch space, pc <- insn_addr + 4
else leave pc alone. */
- dsc->modinsn[0] = insn;
+ RECORD_MOD_32BIT_INSN (0, insn);
dsc->cleanup = &arm_linux_cleanup_svc;
/* Pretend we wrote to the PC, so cleanup doesn't set PC to the next
@@ -885,7 +885,7 @@ arm_catch_kernel_helper_return (struct gdbarch *gdbarch, CORE_ADDR from,
CANNOT_WRITE_PC);
write_memory_unsigned_integer (to + 8, 4, byte_order, from);
- dsc->modinsn[0] = 0xe59ef004; /* ldr pc, [lr, #4]. */
+ RECORD_MOD_32BIT_INSN (0, 0xe59ef004); /* ldr pc, [lr, #4]. */
}
/* Linux-specific displaced step instruction copying function. Detects when
diff --git a/gdb/arm-tdep.c b/gdb/arm-tdep.c
index 64aa500..0e97674 100644
--- a/gdb/arm-tdep.c
+++ b/gdb/arm-tdep.c
@@ -357,6 +357,20 @@ arm_find_mapping_symbol (CORE_ADDR memaddr, CORE_ADDR *start)
static CORE_ADDR arm_get_next_pc_raw (struct frame_info *frame,
CORE_ADDR pc, int insert_bkpt);
+/* Return the offset of breakpoint instruction that should be put in copy
+ area. */
+static int
+arm_displaced_step_breakpoint_offset (struct displaced_step_closure * dsc)
+{
+ int i, size;
+ for (i = 0, size = 0; i < dsc->numinsns; i++)
+ {
+ gdb_assert (dsc->modinsns[i].size == 2 ||dsc->modinsns[i].size == 4);
+ size += dsc->modinsns[i].size;
+ }
+ return size;
+}
+
/* Determine if the program counter specified in MEMADDR is in a Thumb
function. This function should be called for addresses unrelated to
any executing frame; otherwise, prefer arm_frame_is_thumb. */
@@ -4328,7 +4342,7 @@ copy_unmodified (struct gdbarch *gdbarch, uint32_t insn,
"opcode/class '%s' unmodified\n", (unsigned long) insn,
iname);
- dsc->modinsn[0] = insn;
+ RECORD_MOD_32BIT_INSN (0, insn);
return 0;
}
@@ -4371,7 +4385,7 @@ copy_preload (struct gdbarch *gdbarch, uint32_t insn, struct regcache *regs,
dsc->u.preload.immed = 1;
- dsc->modinsn[0] = insn & 0xfff0ffff;
+ RECORD_MOD_32BIT_INSN (0, (insn & 0xfff0ffff));
dsc->cleanup = &cleanup_preload;
@@ -4411,7 +4425,7 @@ copy_preload_reg (struct gdbarch *gdbarch, uint32_t insn, struct regcache *regs,
dsc->u.preload.immed = 0;
- dsc->modinsn[0] = (insn & 0xfff0fff0) | 0x1;
+ RECORD_MOD_32BIT_INSN (0, ((insn & 0xfff0fff0) | 0x1));
dsc->cleanup = &cleanup_preload;
@@ -4464,7 +4478,7 @@ copy_copro_load_store (struct gdbarch *gdbarch, uint32_t insn,
dsc->u.ldst.writeback = bit (insn, 25);
dsc->u.ldst.rn = rn;
- dsc->modinsn[0] = insn & 0xfff0ffff;
+ RECORD_MOD_32BIT_INSN (0, (insn & 0xfff0ffff));
dsc->cleanup = &cleanup_copro_load_store;
@@ -4489,11 +4503,11 @@ cleanup_branch (struct gdbarch *gdbarch, struct regcache *regs,
if (dsc->u.branch.link)
{
- ULONGEST pc = displaced_read_reg (regs, from, 15);
- displaced_write_reg (regs, dsc, 14, pc - 4, CANNOT_WRITE_PC);
+ ULONGEST pc = displaced_read_reg (regs, from, ARM_PC_REGNUM);
+ displaced_write_reg (regs, dsc, ARM_LR_REGNUM, pc - 4, CANNOT_WRITE_PC);
}
- displaced_write_reg (regs, dsc, 15, dsc->u.branch.dest, write_pc);
+ displaced_write_reg (regs, dsc, ARM_PC_REGNUM, dsc->u.branch.dest, write_pc);
}
/* Copy B/BL/BLX instructions with immediate destinations. */
@@ -4536,7 +4550,7 @@ copy_b_bl_blx (struct gdbarch *gdbarch, uint32_t insn,
dsc->u.branch.exchange = exchange;
dsc->u.branch.dest = from + 8 + offset;
- dsc->modinsn[0] = ARM_NOP;
+ RECORD_MOD_32BIT_INSN (0, ARM_NOP);
dsc->cleanup = &cleanup_branch;
@@ -4574,7 +4588,7 @@ copy_bx_blx_reg (struct gdbarch *gdbarch, uint32_t insn,
dsc->u.branch.link = link;
dsc->u.branch.exchange = 1;
- dsc->modinsn[0] = ARM_NOP;
+ RECORD_MOD_32BIT_INSN (0, ARM_NOP);
dsc->cleanup = &cleanup_branch;
@@ -4633,9 +4647,9 @@ copy_alu_imm (struct gdbarch *gdbarch, uint32_t insn, struct regcache *regs,
dsc->rd = rd;
if (is_mov)
- dsc->modinsn[0] = insn & 0xfff00fff;
+ RECORD_MOD_32BIT_INSN (0, (insn & 0xfff00fff));
else
- dsc->modinsn[0] = (insn & 0xfff00fff) | 0x10000;
+ RECORD_MOD_32BIT_INSN (0, ((insn & 0xfff00fff) | 0x10000));
dsc->cleanup = &cleanup_alu_imm;
@@ -4702,9 +4716,9 @@ copy_alu_reg (struct gdbarch *gdbarch, uint32_t insn, struct regcache *regs,
dsc->rd = rd;
if (is_mov)
- dsc->modinsn[0] = (insn & 0xfff00ff0) | 0x2;
+ RECORD_MOD_32BIT_INSN (0, ((insn & 0xfff00ff0) | 0x2));
else
- dsc->modinsn[0] = (insn & 0xfff00ff0) | 0x10002;
+ RECORD_MOD_32BIT_INSN (0, ((insn & 0xfff00ff0) | 0x10002));
dsc->cleanup = &cleanup_alu_reg;
@@ -4776,9 +4790,9 @@ copy_alu_shifted_reg (struct gdbarch *gdbarch, uint32_t insn,
dsc->rd = rd;
if (is_mov)
- dsc->modinsn[0] = (insn & 0xfff000f0) | 0x302;
+ RECORD_MOD_32BIT_INSN (0, ((insn & 0xfff000f0) | 0x302));
else
- dsc->modinsn[0] = (insn & 0xfff000f0) | 0x10302;
+ RECORD_MOD_32BIT_INSN (0, ((insn & 0xfff000f0) | 0x10302));
dsc->cleanup = &cleanup_alu_shifted_reg;
@@ -4902,12 +4916,12 @@ copy_extra_ld_st (struct gdbarch *gdbarch, uint32_t insn, int unpriveleged,
/* {ldr,str}<width><cond> rt, [rt2,] [rn, #imm]
->
{ldr,str}<width><cond> r0, [r1,] [r2, #imm]. */
- dsc->modinsn[0] = (insn & 0xfff00fff) | 0x20000;
+ RECORD_MOD_32BIT_INSN (0, ((insn & 0xfff00fff) | 0x20000));
else
/* {ldr,str}<width><cond> rt, [rt2,] [rn, +/-rm]
->
{ldr,str}<width><cond> r0, [r1,] [r2, +/-r3]. */
- dsc->modinsn[0] = (insn & 0xfff00ff0) | 0x20003;
+ RECORD_MOD_32BIT_INSN (0, ((insn & 0xfff00ff0) | 0x20003));
dsc->cleanup = load[opcode] ? &cleanup_load : &cleanup_store;
@@ -4982,32 +4996,32 @@ copy_ldr_str_ldrb_strb (struct gdbarch *gdbarch, uint32_t insn,
/* {ldr,str}[b]<cond> rt, [rn, #imm], etc.
->
{ldr,str}[b]<cond> r0, [r2, #imm]. */
- dsc->modinsn[0] = (insn & 0xfff00fff) | 0x20000;
+ RECORD_MOD_32BIT_INSN (0, ((insn & 0xfff00fff) | 0x20000));
else
/* {ldr,str}[b]<cond> rt, [rn, rm], etc.
->
{ldr,str}[b]<cond> r0, [r2, r3]. */
- dsc->modinsn[0] = (insn & 0xfff00ff0) | 0x20003;
+ RECORD_MOD_32BIT_INSN (0, ((insn & 0xfff00ff0) | 0x20003));
}
else
{
/* We need to use r4 as scratch. Make sure it's restored afterwards. */
dsc->u.ldst.restore_r4 = 1;
- dsc->modinsn[0] = 0xe58ff014; /* str pc, [pc, #20]. */
- dsc->modinsn[1] = 0xe59f4010; /* ldr r4, [pc, #16]. */
- dsc->modinsn[2] = 0xe044400f; /* sub r4, r4, pc. */
- dsc->modinsn[3] = 0xe2844008; /* add r4, r4, #8. */
- dsc->modinsn[4] = 0xe0800004; /* add r0, r0, r4. */
+ RECORD_MOD_32BIT_INSN (0, 0xe58ff014); /* str pc, [pc, #20]. */
+ RECORD_MOD_32BIT_INSN (1, 0xe59f4010); /* ldr r4, [pc, #16]. */
+ RECORD_MOD_32BIT_INSN (2, 0xe044400f); /* sub r4, r4, pc. */
+ RECORD_MOD_32BIT_INSN (3, 0xe2844008); /* add r4, r4, #8. */
+ RECORD_MOD_32BIT_INSN (4, 0xe0800004); /* add r0, r0, r4. */
/* As above. */
if (immed)
- dsc->modinsn[5] = (insn & 0xfff00fff) | 0x20000;
+ RECORD_MOD_32BIT_INSN (5, ((insn & 0xfff00fff) | 0x20000));
else
- dsc->modinsn[5] = (insn & 0xfff00ff0) | 0x20003;
+ RECORD_MOD_32BIT_INSN (5, ((insn & 0xfff00ff0) | 0x20003));
- dsc->modinsn[6] = 0x0; /* breakpoint location. */
- dsc->modinsn[7] = 0x0; /* scratch space. */
+ RECORD_MOD_32BIT_INSN (6, 0x00); /* breakpoint location. */
+ RECORD_MOD_32BIT_INSN (7, 0x00); /* scratch space. */
dsc->numinsns = 6;
}
@@ -5278,7 +5292,7 @@ copy_block_xfer (struct gdbarch *gdbarch, uint32_t insn, struct regcache *regs,
instruction (which might not behave perfectly in all cases, but
these instructions should be rare enough for that not to matter
too much). */
- dsc->modinsn[0] = ARM_NOP;
+ RECORD_MOD_32BIT_INSN (0, ARM_NOP);
dsc->cleanup = &cleanup_block_load_all;
}
@@ -5322,7 +5336,8 @@ copy_block_xfer (struct gdbarch *gdbarch, uint32_t insn, struct regcache *regs,
"list %.4x\n"), rn, writeback ? "!" : "",
(int) insn & 0xffff, new_regmask);
- dsc->modinsn[0] = (insn & ~0xffff) | (new_regmask & 0xffff);
+ RECORD_MOD_32BIT_INSN (0,
+ ((insn & ~0xffff) | (new_regmask & 0xffff)));
dsc->cleanup = &cleanup_block_load_pc;
}
@@ -5335,7 +5350,7 @@ copy_block_xfer (struct gdbarch *gdbarch, uint32_t insn, struct regcache *regs,
Doing things this way has the advantage that we can auto-detect
the offset of the PC write (which is architecture-dependent) in
the cleanup routine. */
- dsc->modinsn[0] = insn;
+ RECORD_MOD_32BIT_INSN (0, insn);
dsc->cleanup = &cleanup_block_store_pc;
}
@@ -5378,7 +5393,7 @@ copy_svc (struct gdbarch *gdbarch, uint32_t insn, CORE_ADDR to,
Insn: unmodified svc.
Cleanup: pc <- insn_addr + 4. */
- dsc->modinsn[0] = insn;
+ RECORD_MOD_32BIT_INSN (0, insn);
dsc->cleanup = &cleanup_svc;
/* Pretend we wrote to the PC, so cleanup doesn't set PC to the next
@@ -5398,7 +5413,7 @@ copy_undef (struct gdbarch *gdbarch, uint32_t insn,
fprintf_unfiltered (gdb_stdlog, "displaced: copying undefined insn %.8lx\n",
(unsigned long) insn);
- dsc->modinsn[0] = insn;
+ RECORD_MOD_32BIT_INSN (0, insn);
return 0;
}
@@ -5413,7 +5428,7 @@ copy_unpred (struct gdbarch *gdbarch, uint32_t insn,
fprintf_unfiltered (gdb_stdlog, "displaced: copying unpredictable insn "
"%.8lx\n", (unsigned long) insn);
- dsc->modinsn[0] = insn;
+ RECORD_MOD_32BIT_INSN (0, insn);
return 0;
}
@@ -5837,6 +5852,7 @@ decode_svc_copro (struct gdbarch *gdbarch, uint32_t insn, CORE_ADDR to,
else
return copy_undef (gdbarch, insn, dsc); /* Possibly unreachable. */
}
+
static void
thumb_process_displaced_insn (struct gdbarch *gdbarch, CORE_ADDR from,
CORE_ADDR to, struct regcache *regs,
@@ -5901,6 +5917,10 @@ arm_process_displaced_insn (struct gdbarch *gdbarch, CORE_ADDR from,
_("arm_process_displaced_insn: Instruction decode error"));
}
+static const unsigned char * arm_breakpoint_from_pc (struct gdbarch *gdbarch,
+ CORE_ADDR *pcptr,
+ int *lenptr);
+
/* Actually set up the scratch space for a displaced instruction. */
void
@@ -5908,23 +5928,44 @@ arm_displaced_init_closure (struct gdbarch *gdbarch, CORE_ADDR from,
CORE_ADDR to, struct displaced_step_closure *dsc)
{
struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
- unsigned int i;
+ unsigned int i, len, offset;
enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
+ offset = 0;
/* Poke modified instruction(s). */
for (i = 0; i < dsc->numinsns; i++)
{
+ unsigned long insn;
+ if (dsc->modinsns[i].size == 4)
+ insn = dsc->modinsns[i].insn.a;
+ else if (dsc->modinsns[i].size == 2)
+ insn = dsc->modinsns[i].insn.t;
+ else
+ gdb_assert (0);
+
if (debug_displaced)
- fprintf_unfiltered (gdb_stdlog, "displaced: writing insn %.8lx at "
- "%.8lx\n", (unsigned long) dsc->modinsn[i],
- (unsigned long) to + i * 4);
- write_memory_unsigned_integer (to + i * 4, 4, byte_order_for_code,
- dsc->modinsn[i]);
+ {
+ fprintf_unfiltered (gdb_stdlog, "displaced: writing insn ");
+ if (dsc->modinsns[i].size == 4)
+ fprintf_unfiltered (gdb_stdlog, "%.8lx",
+ dsc->modinsns[i].insn.a);
+ else if (dsc->modinsns[i].size == 2)
+ fprintf_unfiltered (gdb_stdlog, "%.4x",
+ dsc->modinsns[i].insn.t);
+
+ fprintf_unfiltered (gdb_stdlog, " at %.8lx\n",
+ (unsigned long) to + offset);
+ }
+ write_memory_unsigned_integer (to + offset, dsc->modinsns[i].size,
+ byte_order_for_code,
+ insn);
+ offset += dsc->modinsns[i].size;
}
/* Put breakpoint afterwards. */
- write_memory (to + dsc->numinsns * 4, tdep->arm_breakpoint,
- tdep->arm_breakpoint_size);
+ write_memory (to + arm_displaced_step_breakpoint_offset (dsc),
+ arm_breakpoint_from_pc (gdbarch, &from, &len),
+ len);
if (debug_displaced)
fprintf_unfiltered (gdb_stdlog, "displaced: copy %s->%s: ",
@@ -5960,7 +6001,11 @@ arm_displaced_step_fixup (struct gdbarch *gdbarch,
dsc->cleanup (gdbarch, regs, dsc);
if (!dsc->wrote_to_pc)
- regcache_cooked_write_unsigned (regs, ARM_PC_REGNUM, dsc->insn_addr + 4);
+ {
+ struct frame_info *fi = get_current_frame ();
+ regcache_cooked_write_unsigned (regs, ARM_PC_REGNUM,
+ arm_get_next_pc_raw(fi, dsc->insn_addr, 0));
+ }
}
#include "bfd-in2.h"
diff --git a/gdb/arm-tdep.h b/gdb/arm-tdep.h
index cfb85ff..dd8aba8 100644
--- a/gdb/arm-tdep.h
+++ b/gdb/arm-tdep.h
@@ -204,6 +204,22 @@ struct gdbarch_tdep
/* Structures used for displaced stepping. */
+#define RECORD_MOD_INSN(INDEX, MODE, INSN) \
+ dsc->modinsns[INDEX].insn.MODE = INSN;\
+ /* dsc->modinsn[INDEX] = INSN */
+
+#define RECORD_MOD_32BIT_INSN(INDEX, INSN) do \
+{\
+ RECORD_MOD_INSN(INDEX, a, INSN);\
+ dsc->modinsns[INDEX].size = 4;\
+ } while (0)
+
+#define RECORD_MOD_16BIT_INSN(INDEX, INSN) do \
+{\
+ RECORD_MOD_INSN(INDEX, t, INSN);\
+ dsc->modinsns[INDEX].size = 2;\
+ } while (0)
+
/* The maximum number of temporaries available for displaced instructions. */
#define DISPLACED_TEMPS 16
/* The maximum number of modified instructions generated for one single-stepped
@@ -262,7 +278,17 @@ struct displaced_step_closure
struct displaced_step_closure *dsc);
} svc;
} u;
- unsigned long modinsn[DISPLACED_MODIFIED_INSNS];
+
+ struct insn
+ {
+ union
+ {
+ unsigned long a;
+ unsigned short t;
+ }insn;
+ unsigned short size;
+ }modinsns[DISPLACED_MODIFIED_INSNS];
+
int numinsns;
CORE_ADDR insn_addr;
CORE_ADDR scratch_base;
next prev parent reply other threads:[~2010-12-25 14:17 UTC|newest]
Thread overview: 66+ messages / expand[flat|nested] mbox.gz Atom feed top
2010-12-25 14:17 [patch 0/3] " Yao Qi
2010-12-25 14:22 ` [patch 1/3] " Yao Qi
2011-02-17 19:09 ` Ulrich Weigand
2010-12-25 17:09 ` Yao Qi [this message]
2011-02-17 19:46 ` [patch 2/3] " Ulrich Weigand
2011-02-18 6:33 ` Yao Qi
2011-02-18 12:18 ` Ulrich Weigand
2011-02-21 7:41 ` Yao Qi
2011-02-21 20:14 ` Ulrich Weigand
2011-02-25 18:09 ` Yao Qi
2011-02-25 20:17 ` Ulrich Weigand
2011-02-26 14:07 ` Yao Qi
2011-02-28 17:37 ` Ulrich Weigand
2011-03-01 9:01 ` Yao Qi
2011-03-01 16:11 ` Ulrich Weigand
2010-12-25 17:54 ` [patch 3/3] " Yao Qi
2010-12-27 15:15 ` Yao Qi
2011-02-17 20:55 ` Ulrich Weigand
2011-02-18 7:30 ` Yao Qi
2011-02-18 13:25 ` Ulrich Weigand
2011-02-28 2:04 ` Displaced stepping 0003: " Yao Qi
2010-12-29 5:48 ` [patch 0/3] Displaced stepping " Yao Qi
2011-01-13 12:38 ` Yao Qi
2011-02-10 6:48 ` Ping 2 " Yao Qi
2011-02-26 17:50 ` Displaced stepping 0002: refactor and create some copy helpers Yao Qi
2011-02-28 17:53 ` Ulrich Weigand
2011-02-28 2:15 ` Displaced stepping 0004: wip: 32-bit Thumb instructions Yao Qi
2011-03-24 13:49 ` [try 2nd 0/8] Displaced stepping for " Yao Qi
2011-03-24 13:56 ` [try 2nd 1/8] Fix cleanup_branch to take Thumb into account Yao Qi
2011-04-06 20:46 ` Ulrich Weigand
2011-04-07 3:45 ` Yao Qi
2011-03-24 13:58 ` [try 2nd 2/8] Rename copy_* functions to arm_copy_* Yao Qi
2011-04-06 20:51 ` Ulrich Weigand
2011-04-07 8:02 ` Yao Qi
2011-04-19 9:07 ` Yao Qi
2011-04-26 17:09 ` Ulrich Weigand
2011-04-27 10:27 ` Yao Qi
2011-04-27 13:32 ` Ulrich Weigand
2011-04-28 5:05 ` Yao Qi
2011-03-24 14:01 ` [try 2nd 3/8] Refactor copy_svc_os Yao Qi
2011-04-06 20:55 ` Ulrich Weigand
2011-04-07 4:19 ` Yao Qi
2011-03-24 14:05 ` [try 2nd 5/8] Displaced stepping for Thumb 32-bit insns Yao Qi
2011-05-05 13:25 ` Yao Qi
2011-05-17 17:14 ` Ulrich Weigand
2011-05-23 11:32 ` Yao Qi
2011-05-23 11:32 ` Yao Qi
2011-05-27 22:11 ` Ulrich Weigand
2011-07-06 10:55 ` Yao Qi
2011-07-15 19:57 ` Ulrich Weigand
2011-07-18 9:26 ` Yao Qi
2011-03-24 14:05 ` [try 2nd 4/8] Displaced stepping for Thumb 16-bit insn Yao Qi
2011-05-05 13:24 ` Yao Qi
2011-05-10 13:58 ` Ulrich Weigand
2011-05-11 13:06 ` Yao Qi
2011-05-16 17:19 ` Ulrich Weigand
2011-05-17 14:29 ` Yao Qi
2011-05-17 17:20 ` Ulrich Weigand
2011-03-24 14:06 ` [try 2nd 6/8] Rename some functions to arm_* Yao Qi
2011-04-06 20:52 ` Ulrich Weigand
2011-04-07 4:26 ` Yao Qi
2011-03-24 14:11 ` [try 2nd 7/8] Test case Yao Qi
2011-05-05 13:26 ` Yao Qi
2011-05-11 13:15 ` [try 2nd 7/8] Test case: V3 Yao Qi
2011-05-17 17:24 ` Ulrich Weigand
2011-03-24 15:14 ` [try 2nd 8/8] NEWS Yao Qi
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=4D15FCD1.7070706@codesourcery.com \
--to=yao@codesourcery.com \
--cc=gdb-patches@sourceware.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox