* [PATCH 07/15] btrace: add replay position to btrace thread info
2013-05-02 12:03 [PATCH 00/15] record-btrace: goto support Markus Metzger
@ 2013-05-02 12:03 ` Markus Metzger
2013-05-02 12:03 ` [PATCH 01/15] gdbarch: add instruction predicate methods Markus Metzger
` (13 subsequent siblings)
14 siblings, 0 replies; 24+ messages in thread
From: Markus Metzger @ 2013-05-02 12:03 UTC (permalink / raw)
To: jan.kratochvil; +Cc: gdb-patches
Add a branch trace instruction iterator pointing to the current replay position
to the branch trace thread info struct.
Free the iterator when btrace is cleared.
Start at the replay position for the instruction and function-call histories.
2013-05-02 Markus Metzger <markus.t.metzger@intel.com>
* btrace.h (replay) <replay>: New.
(btrace_is_replaying): New.
* btrace.c (btrace_clear): Free replay iterator.
(btrace_is_replaying): New.
* record-btrace.c (record_btrace_is_replaying): New.
(record_btrace_info): Print insn number if replaying.
(record_btrace_insn_history): Start at replay position.
(record_btrace_call_history): Start at replay position.
(init_record_btrace_ops): Init to_record_is_replaying.
---
gdb/btrace.c | 10 ++++++
gdb/btrace.h | 6 ++++
gdb/record-btrace.c | 77 +++++++++++++++++++++++++++++++++++++++++++++-----
3 files changed, 85 insertions(+), 8 deletions(-)
diff --git a/gdb/btrace.c b/gdb/btrace.c
index 9478350..bb2e051 100644
--- a/gdb/btrace.c
+++ b/gdb/btrace.c
@@ -750,9 +750,11 @@ btrace_clear (struct thread_info *tp)
xfree (btinfo->insn_history);
xfree (btinfo->call_history);
+ xfree (btinfo->replay);
btinfo->insn_history = NULL;
btinfo->call_history = NULL;
+ btinfo->replay = NULL;
}
/* See btrace.h. */
@@ -1158,3 +1160,11 @@ btrace_set_call_history (struct btrace_thread_info *btinfo,
history->begin = begin;
history->end = end;
}
+
+/* See btrace.h. */
+
+int
+btrace_is_replaying (struct thread_info *tp)
+{
+ return tp->btrace.replay != NULL;
+}
diff --git a/gdb/btrace.h b/gdb/btrace.h
index ac7acdb..cb7a8b6 100644
--- a/gdb/btrace.h
+++ b/gdb/btrace.h
@@ -160,6 +160,9 @@ struct btrace_thread_info
/* The function call history iterator. */
struct btrace_call_history *call_history;
+
+ /* The current replay position. NULL if not replaying. */
+ struct btrace_insn_iterator *replay;
};
/* Enable branch tracing for a thread. */
@@ -242,4 +245,7 @@ extern void btrace_set_call_history (struct btrace_thread_info *,
struct btrace_function *begin,
struct btrace_function *end);
+/* Determine if branch tracing is currently replaying TP. */
+extern int btrace_is_replaying (struct thread_info *tp);
+
#endif /* BTRACE_H */
diff --git a/gdb/record-btrace.c b/gdb/record-btrace.c
index ebb26ad..56eccab 100644
--- a/gdb/record-btrace.c
+++ b/gdb/record-btrace.c
@@ -231,6 +231,10 @@ record_btrace_info (void)
printf_unfiltered (_("Recorded %u instructions in %u functions for thread "
"%d (%s).\n"), insts, funcs, tp->num,
target_pid_to_str (tp->ptid));
+
+ if (btrace_is_replaying (tp))
+ printf_unfiltered (_("Replay in progress. At instruction %u.\n"),
+ btrace_insn_number (btinfo->replay));
}
/* Print an unsigned int. */
@@ -296,11 +300,32 @@ record_btrace_insn_history (int size, int flags)
history = btinfo->insn_history;
if (history == NULL)
{
- /* No matter the direction, we start with the tail of the trace. */
- btrace_insn_end (&begin, btinfo);
- end = begin;
+ struct btrace_insn_iterator *replay;
+
+ /* If we're replaying, we start at the replay position. Otherwise, we
+ start at the tail of the trace. */
+ replay = btinfo->replay;
+ if (replay != NULL)
+ begin = *replay;
+ else
+ btrace_insn_end (&begin, btinfo);
- covered = btrace_insn_prev (&begin, context);
+ /* We start from here and expand in the requested direction. Then we
+ expand in the other direction, as well, to fill up any remaining
+ context. */
+ end = begin;
+ if (size < 0)
+ {
+ /* We want the current position covered, as well. */
+ covered = btrace_insn_next (&end, 1);
+ covered += btrace_insn_prev (&begin, context - covered);
+ covered += btrace_insn_next (&end, context - covered);
+ }
+ else
+ {
+ covered = btrace_insn_next (&end, context);
+ covered += btrace_insn_prev (&begin, context - covered);
+ }
}
else
{
@@ -608,11 +633,32 @@ record_btrace_call_history (int size, int flags)
history = btinfo->call_history;
if (history == NULL)
{
- /* No matter the direction, we start with the tail of the trace. */
- begin = btinfo->end;
- end = begin;
+ struct btrace_insn_iterator *replay;
+
+ /* If we're replaying, we start at the replay position. Otherwise, we
+ start at the tail of the trace. */
+ replay = btinfo->replay;
+ if (replay != NULL)
+ begin = replay->function;
+ else
+ begin = btinfo->end;
- covered = btrace_func_prev (&begin, context);
+ /* We start from here and expand in the requested direction. Then we
+ expand in the other direction, as well, to fill up any remaining
+ context. */
+ end = begin;
+ if (size < 0)
+ {
+ /* We want the current position covered, as well. */
+ covered = btrace_func_next (&end, 1);
+ covered += btrace_func_prev (&begin, context - covered);
+ covered += btrace_func_next (&end, context - covered);
+ }
+ else
+ {
+ covered = btrace_func_next (&end, context);
+ covered += btrace_func_prev (&begin, context- covered);
+ }
}
else
{
@@ -729,6 +775,20 @@ record_btrace_call_history_from (ULONGEST from, int size, int flags)
record_btrace_call_history_range (begin, end, flags);
}
+/* The to_record_is_replaying method of target record-btrace. */
+
+static int
+record_btrace_is_replaying (void)
+{
+ struct thread_info *tp;
+
+ ALL_THREADS (tp)
+ if (btrace_is_replaying (tp))
+ return 1;
+
+ return 0;
+}
+
/* Initialize the record-btrace target ops. */
static void
@@ -755,6 +815,7 @@ init_record_btrace_ops (void)
ops->to_call_history = record_btrace_call_history;
ops->to_call_history_from = record_btrace_call_history_from;
ops->to_call_history_range = record_btrace_call_history_range;
+ ops->to_record_is_replaying = record_btrace_is_replaying;
ops->to_stratum = record_stratum;
ops->to_magic = OPS_MAGIC;
}
--
1.7.1
^ permalink raw reply [flat|nested] 24+ messages in thread* [PATCH 01/15] gdbarch: add instruction predicate methods
2013-05-02 12:03 [PATCH 00/15] record-btrace: goto support Markus Metzger
2013-05-02 12:03 ` [PATCH 07/15] btrace: add replay position to btrace thread info Markus Metzger
@ 2013-05-02 12:03 ` Markus Metzger
2013-05-13 15:23 ` Jan Kratochvil
2013-05-02 12:03 ` [PATCH 03/15] record-btrace: fix insn range in function call history Markus Metzger
` (12 subsequent siblings)
14 siblings, 1 reply; 24+ messages in thread
From: Markus Metzger @ 2013-05-02 12:03 UTC (permalink / raw)
To: jan.kratochvil; +Cc: gdb-patches
Add new methods to gdbarch for analyzing the instruction at a given address.
Implement those methods for i386 and amd64 architectures.
2013-05-02 Markus Metzger <markus.t.metzger@intel.com>
* amd64-tdep.c (amd64_classify_insn_at, amd64_insn_call_p,
amd64_insn_ret_p, amd64_insn_jump_p, amd64_jmp_p): New.
(amd64_init_abi): Add insn_call_p, insn_ret_p, and insn_jump_p
to gdbarch.
* i386-tdep.c (i386_insn_call_p, i386_insn_ret_p,
i386_insn_jump_p, i386_jmp_p): New.
(i386_gdbarch_init): Add insn_call_p, insn_ret_p, and
insn_jump_p to gdbarch.
* gdbarch.sh (insn_call_p, insn_ret_p, insn_jump_p): New.
* gdbarch.h: Regenerated.
* gdbarch.c: Regenerated.
---
gdb/amd64-tdep.c | 66 ++++++++++++++++++++++++++++++++++++
gdb/gdbarch.c | 99 ++++++++++++++++++++++++++++++++++++++++++++++++++++++
gdb/gdbarch.h | 24 +++++++++++++
gdb/gdbarch.sh | 9 +++++
gdb/i386-tdep.c | 57 +++++++++++++++++++++++++++++++
5 files changed, 255 insertions(+), 0 deletions(-)
diff --git a/gdb/amd64-tdep.c b/gdb/amd64-tdep.c
index 3ab74f0..30013b3 100644
--- a/gdb/amd64-tdep.c
+++ b/gdb/amd64-tdep.c
@@ -1365,6 +1365,22 @@ amd64_absolute_jmp_p (const struct amd64_insn *details)
}
static int
+amd64_jmp_p (const struct amd64_insn *details)
+{
+ const gdb_byte *insn = &details->raw_insn[details->opcode_offset];
+
+ /* jump short, relative. */
+ if (insn[0] == 0xeb)
+ return 1;
+
+ /* jump near, relative. */
+ if (insn[0] == 0xe9)
+ return 1;
+
+ return amd64_absolute_jmp_p (details);
+}
+
+static int
amd64_absolute_call_p (const struct amd64_insn *details)
{
const gdb_byte *insn = &details->raw_insn[details->opcode_offset];
@@ -1435,6 +1451,53 @@ amd64_syscall_p (const struct amd64_insn *details, int *lengthp)
return 0;
}
+/* Classify the instruction at ADDR using PRED. */
+
+static int
+amd64_classify_insn_at (struct gdbarch *gdbarch, CORE_ADDR addr,
+ int (*pred) (const struct amd64_insn *))
+{
+ struct amd64_insn details;
+ gdb_byte *buf;
+ int len, classification;
+
+ len = gdbarch_max_insn_length (gdbarch);
+ buf = xzalloc (len);
+
+ read_memory (addr, buf, len);
+ amd64_get_insn_details (buf, &details);
+
+ classification = pred (&details);
+
+ xfree (buf);
+
+ return classification;
+}
+
+/* The gdbarch insn_call_p method. */
+
+static int
+amd64_insn_call_p (struct gdbarch *gdbarch, CORE_ADDR addr)
+{
+ return amd64_classify_insn_at (gdbarch, addr, amd64_call_p);
+}
+
+/* The gdbarch insn_ret_p method. */
+
+static int
+amd64_insn_ret_p (struct gdbarch *gdbarch, CORE_ADDR addr)
+{
+ return amd64_classify_insn_at (gdbarch, addr, amd64_ret_p);
+}
+
+/* The gdbarch insn_jump_p method. */
+
+static int
+amd64_insn_jump_p (struct gdbarch *gdbarch, CORE_ADDR addr)
+{
+ return amd64_classify_insn_at (gdbarch, addr, amd64_jmp_p);
+}
+
/* Fix up the state of registers and memory after having single-stepped
a displaced instruction. */
@@ -2968,6 +3031,9 @@ amd64_init_abi (struct gdbarch_info info, struct gdbarch *gdbarch)
i386_stap_is_single_operand);
set_gdbarch_stap_parse_special_token (gdbarch,
i386_stap_parse_special_token);
+ set_gdbarch_insn_call_p (gdbarch, amd64_insn_call_p);
+ set_gdbarch_insn_ret_p (gdbarch, amd64_insn_ret_p);
+ set_gdbarch_insn_jump_p (gdbarch, amd64_insn_jump_p);
}
\f
diff --git a/gdb/gdbarch.c b/gdb/gdbarch.c
index 129268f..cf7b82e 100644
--- a/gdb/gdbarch.c
+++ b/gdb/gdbarch.c
@@ -286,6 +286,9 @@ struct gdbarch
gdbarch_core_info_proc_ftype *core_info_proc;
gdbarch_iterate_over_objfiles_in_search_order_ftype *iterate_over_objfiles_in_search_order;
struct ravenscar_arch_ops * ravenscar_ops;
+ gdbarch_insn_call_p_ftype *insn_call_p;
+ gdbarch_insn_ret_p_ftype *insn_ret_p;
+ gdbarch_insn_jump_p_ftype *insn_jump_p;
};
@@ -457,6 +460,9 @@ struct gdbarch startup_gdbarch =
0, /* core_info_proc */
default_iterate_over_objfiles_in_search_order, /* iterate_over_objfiles_in_search_order */
NULL, /* ravenscar_ops */
+ 0, /* insn_call_p */
+ 0, /* insn_ret_p */
+ 0, /* insn_jump_p */
/* startup_gdbarch() */
};
@@ -760,6 +766,9 @@ verify_gdbarch (struct gdbarch *gdbarch)
/* Skip verify of core_info_proc, has predicate. */
/* Skip verify of iterate_over_objfiles_in_search_order, invalid_p == 0 */
/* Skip verify of ravenscar_ops, invalid_p == 0 */
+ /* Skip verify of insn_call_p, has predicate. */
+ /* Skip verify of insn_ret_p, has predicate. */
+ /* Skip verify of insn_jump_p, has predicate. */
buf = ui_file_xstrdup (log, &length);
make_cleanup (xfree, buf);
if (length > 0)
@@ -1081,6 +1090,24 @@ gdbarch_dump (struct gdbarch *gdbarch, struct ui_file *file)
"gdbarch_dump: inner_than = <%s>\n",
host_address_to_string (gdbarch->inner_than));
fprintf_unfiltered (file,
+ "gdbarch_dump: gdbarch_insn_call_p_p() = %d\n",
+ gdbarch_insn_call_p_p (gdbarch));
+ fprintf_unfiltered (file,
+ "gdbarch_dump: insn_call_p = <%s>\n",
+ host_address_to_string (gdbarch->insn_call_p));
+ fprintf_unfiltered (file,
+ "gdbarch_dump: gdbarch_insn_jump_p_p() = %d\n",
+ gdbarch_insn_jump_p_p (gdbarch));
+ fprintf_unfiltered (file,
+ "gdbarch_dump: insn_jump_p = <%s>\n",
+ host_address_to_string (gdbarch->insn_jump_p));
+ fprintf_unfiltered (file,
+ "gdbarch_dump: gdbarch_insn_ret_p_p() = %d\n",
+ gdbarch_insn_ret_p_p (gdbarch));
+ fprintf_unfiltered (file,
+ "gdbarch_dump: insn_ret_p = <%s>\n",
+ host_address_to_string (gdbarch->insn_ret_p));
+ fprintf_unfiltered (file,
"gdbarch_dump: int_bit = %s\n",
plongest (gdbarch->int_bit));
fprintf_unfiltered (file,
@@ -4356,6 +4383,78 @@ set_gdbarch_ravenscar_ops (struct gdbarch *gdbarch,
gdbarch->ravenscar_ops = ravenscar_ops;
}
+int
+gdbarch_insn_call_p_p (struct gdbarch *gdbarch)
+{
+ gdb_assert (gdbarch != NULL);
+ return gdbarch->insn_call_p != NULL;
+}
+
+int
+gdbarch_insn_call_p (struct gdbarch *gdbarch, CORE_ADDR addr)
+{
+ gdb_assert (gdbarch != NULL);
+ gdb_assert (gdbarch->insn_call_p != NULL);
+ if (gdbarch_debug >= 2)
+ fprintf_unfiltered (gdb_stdlog, "gdbarch_insn_call_p called\n");
+ return gdbarch->insn_call_p (gdbarch, addr);
+}
+
+void
+set_gdbarch_insn_call_p (struct gdbarch *gdbarch,
+ gdbarch_insn_call_p_ftype insn_call_p)
+{
+ gdbarch->insn_call_p = insn_call_p;
+}
+
+int
+gdbarch_insn_ret_p_p (struct gdbarch *gdbarch)
+{
+ gdb_assert (gdbarch != NULL);
+ return gdbarch->insn_ret_p != NULL;
+}
+
+int
+gdbarch_insn_ret_p (struct gdbarch *gdbarch, CORE_ADDR addr)
+{
+ gdb_assert (gdbarch != NULL);
+ gdb_assert (gdbarch->insn_ret_p != NULL);
+ if (gdbarch_debug >= 2)
+ fprintf_unfiltered (gdb_stdlog, "gdbarch_insn_ret_p called\n");
+ return gdbarch->insn_ret_p (gdbarch, addr);
+}
+
+void
+set_gdbarch_insn_ret_p (struct gdbarch *gdbarch,
+ gdbarch_insn_ret_p_ftype insn_ret_p)
+{
+ gdbarch->insn_ret_p = insn_ret_p;
+}
+
+int
+gdbarch_insn_jump_p_p (struct gdbarch *gdbarch)
+{
+ gdb_assert (gdbarch != NULL);
+ return gdbarch->insn_jump_p != NULL;
+}
+
+int
+gdbarch_insn_jump_p (struct gdbarch *gdbarch, CORE_ADDR addr)
+{
+ gdb_assert (gdbarch != NULL);
+ gdb_assert (gdbarch->insn_jump_p != NULL);
+ if (gdbarch_debug >= 2)
+ fprintf_unfiltered (gdb_stdlog, "gdbarch_insn_jump_p called\n");
+ return gdbarch->insn_jump_p (gdbarch, addr);
+}
+
+void
+set_gdbarch_insn_jump_p (struct gdbarch *gdbarch,
+ gdbarch_insn_jump_p_ftype insn_jump_p)
+{
+ gdbarch->insn_jump_p = insn_jump_p;
+}
+
/* Keep a registry of per-architecture data-pointers required by GDB
modules. */
diff --git a/gdb/gdbarch.h b/gdb/gdbarch.h
index 464c4b6..6540d37 100644
--- a/gdb/gdbarch.h
+++ b/gdb/gdbarch.h
@@ -1239,6 +1239,30 @@ extern void set_gdbarch_iterate_over_objfiles_in_search_order (struct gdbarch *g
extern struct ravenscar_arch_ops * gdbarch_ravenscar_ops (struct gdbarch *gdbarch);
extern void set_gdbarch_ravenscar_ops (struct gdbarch *gdbarch, struct ravenscar_arch_ops * ravenscar_ops);
+/* Return non-zero if the instruction at ADDR is a call; zero otherwise. */
+
+extern int gdbarch_insn_call_p_p (struct gdbarch *gdbarch);
+
+typedef int (gdbarch_insn_call_p_ftype) (struct gdbarch *gdbarch, CORE_ADDR addr);
+extern int gdbarch_insn_call_p (struct gdbarch *gdbarch, CORE_ADDR addr);
+extern void set_gdbarch_insn_call_p (struct gdbarch *gdbarch, gdbarch_insn_call_p_ftype *insn_call_p);
+
+/* Return non-zero if the instruction at ADDR is a return; zero otherwise. */
+
+extern int gdbarch_insn_ret_p_p (struct gdbarch *gdbarch);
+
+typedef int (gdbarch_insn_ret_p_ftype) (struct gdbarch *gdbarch, CORE_ADDR addr);
+extern int gdbarch_insn_ret_p (struct gdbarch *gdbarch, CORE_ADDR addr);
+extern void set_gdbarch_insn_ret_p (struct gdbarch *gdbarch, gdbarch_insn_ret_p_ftype *insn_ret_p);
+
+/* Return non-zero if the instruction at ADDR is a jump; zero otherwise. */
+
+extern int gdbarch_insn_jump_p_p (struct gdbarch *gdbarch);
+
+typedef int (gdbarch_insn_jump_p_ftype) (struct gdbarch *gdbarch, CORE_ADDR addr);
+extern int gdbarch_insn_jump_p (struct gdbarch *gdbarch, CORE_ADDR addr);
+extern void set_gdbarch_insn_jump_p (struct gdbarch *gdbarch, gdbarch_insn_jump_p_ftype *insn_jump_p);
+
/* Definition for an unknown syscall, used basically in error-cases. */
#define UNKNOWN_SYSCALL (-1)
diff --git a/gdb/gdbarch.sh b/gdb/gdbarch.sh
index 92d4f0f..5464ff5 100755
--- a/gdb/gdbarch.sh
+++ b/gdb/gdbarch.sh
@@ -972,6 +972,15 @@ m:void:iterate_over_objfiles_in_search_order:iterate_over_objfiles_in_search_ord
# Ravenscar arch-dependent ops.
v:struct ravenscar_arch_ops *:ravenscar_ops:::NULL:NULL::0:host_address_to_string (gdbarch->ravenscar_ops)
+
+# Return non-zero if the instruction at ADDR is a call; zero otherwise.
+M:int:insn_call_p:CORE_ADDR addr:addr
+
+# Return non-zero if the instruction at ADDR is a return; zero otherwise.
+M:int:insn_ret_p:CORE_ADDR addr:addr
+
+# Return non-zero if the instruction at ADDR is a jump; zero otherwise.
+M:int:insn_jump_p:CORE_ADDR addr:addr
EOF
}
diff --git a/gdb/i386-tdep.c b/gdb/i386-tdep.c
index 930d6fc..1d011de 100644
--- a/gdb/i386-tdep.c
+++ b/gdb/i386-tdep.c
@@ -473,6 +473,20 @@ i386_absolute_jmp_p (const gdb_byte *insn)
}
static int
+i386_jmp_p (const gdb_byte *insn)
+{
+ /* jump short, relative. */
+ if (insn[0] == 0xeb)
+ return 1;
+
+ /* jump near, relative. */
+ if (insn[0] == 0xe9)
+ return 1;
+
+ return i386_absolute_jmp_p (insn);
+}
+
+static int
i386_absolute_call_p (const gdb_byte *insn)
{
/* call far, absolute. */
@@ -543,6 +557,45 @@ i386_syscall_p (const gdb_byte *insn, int *lengthp)
return 0;
}
+/* The gdbarch insn_call_p method. */
+
+static int
+i386_insn_call_p (struct gdbarch *gdbarch, CORE_ADDR addr)
+{
+ gdb_byte buf[I386_MAX_INSN_LEN], *insn;
+
+ read_memory (addr, buf, I386_MAX_INSN_LEN);
+ insn = i386_skip_prefixes (buf, I386_MAX_INSN_LEN);
+
+ return i386_call_p (insn);
+}
+
+/* The gdbarch insn_ret_p method. */
+
+static int
+i386_insn_ret_p (struct gdbarch *gdbarch, CORE_ADDR addr)
+{
+ gdb_byte buf[I386_MAX_INSN_LEN], *insn;
+
+ read_memory (addr, buf, I386_MAX_INSN_LEN);
+ insn = i386_skip_prefixes (buf, I386_MAX_INSN_LEN);
+
+ return i386_ret_p (insn);
+}
+
+/* The gdbarch insn_jump_p method. */
+
+static int
+i386_insn_jump_p (struct gdbarch *gdbarch, CORE_ADDR addr)
+{
+ gdb_byte buf[I386_MAX_INSN_LEN], *insn;
+
+ read_memory (addr, buf, I386_MAX_INSN_LEN);
+ insn = i386_skip_prefixes (buf, I386_MAX_INSN_LEN);
+
+ return i386_jmp_p (insn);
+}
+
/* Some kernels may run one past a syscall insn, so we have to cope.
Otherwise this is just simple_displaced_step_copy_insn. */
@@ -7774,6 +7827,10 @@ i386_gdbarch_init (struct gdbarch_info info, struct gdbarch_list *arches)
set_gdbarch_gen_return_address (gdbarch, i386_gen_return_address);
+ set_gdbarch_insn_call_p (gdbarch, i386_insn_call_p);
+ set_gdbarch_insn_ret_p (gdbarch, i386_insn_ret_p);
+ set_gdbarch_insn_jump_p (gdbarch, i386_insn_jump_p);
+
/* Hook in ABI-specific overrides, if they have been registered. */
info.tdep_info = (void *) tdesc_data;
gdbarch_init_osabi (info, gdbarch);
--
1.7.1
^ permalink raw reply [flat|nested] 24+ messages in thread* Re: [PATCH 01/15] gdbarch: add instruction predicate methods
2013-05-02 12:03 ` [PATCH 01/15] gdbarch: add instruction predicate methods Markus Metzger
@ 2013-05-13 15:23 ` Jan Kratochvil
0 siblings, 0 replies; 24+ messages in thread
From: Jan Kratochvil @ 2013-05-13 15:23 UTC (permalink / raw)
To: Markus Metzger; +Cc: gdb-patches
On Thu, 02 May 2013 14:03:22 +0200, Markus Metzger wrote:
[...]
> diff --git a/gdb/amd64-tdep.c b/gdb/amd64-tdep.c
> index 3ab74f0..30013b3 100644
> --- a/gdb/amd64-tdep.c
> +++ b/gdb/amd64-tdep.c
> @@ -1365,6 +1365,22 @@ amd64_absolute_jmp_p (const struct amd64_insn *details)
> }
>
Function comment/description.
> static int
> +amd64_jmp_p (const struct amd64_insn *details)
> +{
> + const gdb_byte *insn = &details->raw_insn[details->opcode_offset];
> +
> + /* jump short, relative. */
> + if (insn[0] == 0xeb)
> + return 1;
> +
> + /* jump near, relative. */
> + if (insn[0] == 0xe9)
> + return 1;
> +
> + return amd64_absolute_jmp_p (details);
> +}
> +
> +static int
> amd64_absolute_call_p (const struct amd64_insn *details)
> {
> const gdb_byte *insn = &details->raw_insn[details->opcode_offset];
> @@ -1435,6 +1451,53 @@ amd64_syscall_p (const struct amd64_insn *details, int *lengthp)
> return 0;
> }
>
> +/* Classify the instruction at ADDR using PRED. */
+ /* Throw error if it cannot be read. */
> +
> +static int
> +amd64_classify_insn_at (struct gdbarch *gdbarch, CORE_ADDR addr,
> + int (*pred) (const struct amd64_insn *))
> +{
> + struct amd64_insn details;
> + gdb_byte *buf;
> + int len, classification;
> +
> + len = gdbarch_max_insn_length (gdbarch);
> + buf = xzalloc (len);
xmalloc is sufficient. But as read_memory can throw we need make_cleanup with
xfree. But that seems as too complicated, use just alloca here.
(I see there is already the same bug in amd64_relocate_instruction.)
> +
> + read_memory (addr, buf, len);
gdbarch_max_insn_length may be longer than the real instruction and readable
memory but the same bug is in amd64_relocate_instruction so it can be fixed in
the future together, OK with it as is.
> + amd64_get_insn_details (buf, &details);
> +
> + classification = pred (&details);
> +
> + xfree (buf);
> +
> + return classification;
> +}
> +
[...]
> --- a/gdb/gdbarch.sh
> +++ b/gdb/gdbarch.sh
> @@ -972,6 +972,15 @@ m:void:iterate_over_objfiles_in_search_order:iterate_over_objfiles_in_search_ord
>
> # Ravenscar arch-dependent ops.
> v:struct ravenscar_arch_ops *:ravenscar_ops:::NULL:NULL::0:host_address_to_string (gdbarch->ravenscar_ops)
> +
> +# Return non-zero if the instruction at ADDR is a call; zero otherwise.
> +M:int:insn_call_p:CORE_ADDR addr:addr
> +
> +# Return non-zero if the instruction at ADDR is a return; zero otherwise.
> +M:int:insn_ret_p:CORE_ADDR addr:addr
> +
> +# Return non-zero if the instruction at ADDR is a jump; zero otherwise.
> +M:int:insn_jump_p:CORE_ADDR addr:addr
These *_p functions with 'M' lead to calls like:
if (!gdbarch_insn_call_p_p (gdbarch))
return NULL;
There are already existing functions like amd64_ret_p but it would be better
not to follow this naming for gdbarch.
gdbarch can have methods like insn_is_call, insn_is_ret etc.
Additionally you can also provide default function here always returning zero
by
M:int:insn_is_call:CORE_ADDR addr:addr::default_insn_is_call
to simplify
if (gdbarch_insn_ret_p_p (gdbarch) && gdbarch_insn_ret_p (gdbarch, lpc))
->
if (gdbarch_insn_ret_p (gdbarch, lpc))
but maybe you are already aware of it and chose it as is intentionally.
> EOF
> }
>
> diff --git a/gdb/i386-tdep.c b/gdb/i386-tdep.c
> index 930d6fc..1d011de 100644
> --- a/gdb/i386-tdep.c
> +++ b/gdb/i386-tdep.c
> @@ -473,6 +473,20 @@ i386_absolute_jmp_p (const gdb_byte *insn)
> }
>
Function comment/description.
> static int
> +i386_jmp_p (const gdb_byte *insn)
> +{
> + /* jump short, relative. */
> + if (insn[0] == 0xeb)
> + return 1;
> +
> + /* jump near, relative. */
> + if (insn[0] == 0xe9)
> + return 1;
> +
> + return i386_absolute_jmp_p (insn);
> +}
> +
> +static int
> i386_absolute_call_p (const gdb_byte *insn)
> {
> /* call far, absolute. */
[...]
Thanks,
Jan
^ permalink raw reply [flat|nested] 24+ messages in thread
* [PATCH 03/15] record-btrace: fix insn range in function call history
2013-05-02 12:03 [PATCH 00/15] record-btrace: goto support Markus Metzger
2013-05-02 12:03 ` [PATCH 07/15] btrace: add replay position to btrace thread info Markus Metzger
2013-05-02 12:03 ` [PATCH 01/15] gdbarch: add instruction predicate methods Markus Metzger
@ 2013-05-02 12:03 ` Markus Metzger
2013-05-02 12:03 ` [PATCH 12/15] record-btrace: provide xfer_partial target method Markus Metzger
` (11 subsequent siblings)
14 siblings, 0 replies; 24+ messages in thread
From: Markus Metzger @ 2013-05-02 12:03 UTC (permalink / raw)
To: jan.kratochvil; +Cc: gdb-patches
With the "/i" modifier, we print the instruction number range in the
"record function-call-history" command as [begin, end).
It would be more intuitive if we printed the range as [begin, end].
2013-05-02 Markus Metzger <markus.t.metzger@intel.com>
* record-btrace.c (btrace_call_history_insn_range): Print
insn range as [begin, end].
---
gdb/record-btrace.c | 8 ++++++--
1 files changed, 6 insertions(+), 2 deletions(-)
diff --git a/gdb/record-btrace.c b/gdb/record-btrace.c
index e2506a8..d1c9293 100644
--- a/gdb/record-btrace.c
+++ b/gdb/record-btrace.c
@@ -418,10 +418,14 @@ static void
btrace_call_history_insn_range (struct ui_out *uiout,
const struct btrace_function *bfun)
{
- unsigned int begin, end;
+ unsigned int begin, end, size;
+
+ size = VEC_length (btrace_insn_s, bfun->insn);
+ if (size == 0)
+ return;
begin = bfun->insn_offset;
- end = begin + VEC_length (btrace_insn_s, bfun->insn);
+ end = begin + size - 1;
ui_out_field_uint (uiout, "insn begin", begin);
ui_out_text (uiout, "-");
--
1.7.1
^ permalink raw reply [flat|nested] 24+ messages in thread* [PATCH 12/15] record-btrace: provide xfer_partial target method
2013-05-02 12:03 [PATCH 00/15] record-btrace: goto support Markus Metzger
` (2 preceding siblings ...)
2013-05-02 12:03 ` [PATCH 03/15] record-btrace: fix insn range in function call history Markus Metzger
@ 2013-05-02 12:03 ` Markus Metzger
2013-05-02 12:03 ` [PATCH 08/15] target: add ops parameter to to_prepare_to_store method Markus Metzger
` (10 subsequent siblings)
14 siblings, 0 replies; 24+ messages in thread
From: Markus Metzger @ 2013-05-02 12:03 UTC (permalink / raw)
To: jan.kratochvil; +Cc: gdb-patches
Provide the xfer_partial target method for the btrace record target.
Only allow memory accesses to readonly memory while we're replaying.
2013-05-02 Markus Metzger <markus.t.metzger@intel.com>
* record-btrace.c (record_btrace_xfer_partial): New.
(init_record_btrace_ops): Initialize xfer_partial.
---
gdb/record-btrace.c | 59 +++++++++++++++++++++++++++++++++++++++++++++++++++
1 files changed, 59 insertions(+), 0 deletions(-)
diff --git a/gdb/record-btrace.c b/gdb/record-btrace.c
index 20c61b7..cd17c77 100644
--- a/gdb/record-btrace.c
+++ b/gdb/record-btrace.c
@@ -791,6 +791,64 @@ record_btrace_is_replaying (void)
return 0;
}
+/* The to_xfer_partial method of target record-btrace. */
+
+static LONGEST
+record_btrace_xfer_partial (struct target_ops *ops, enum target_object object,
+ const char *annex, gdb_byte *readbuf,
+ const gdb_byte *writebuf, ULONGEST offset,
+ LONGEST len)
+{
+ struct target_ops *t;
+
+ /* Normalize the request so len is positive. */
+ if (len < 0)
+ {
+ offset += len;
+ len = - len;
+ }
+
+ /* Filter out requests that don't make sense during replay. */
+ if (record_btrace_is_replaying ())
+ {
+ switch (object)
+ {
+ case TARGET_OBJECT_MEMORY:
+ case TARGET_OBJECT_RAW_MEMORY:
+ case TARGET_OBJECT_STACK_MEMORY:
+ {
+ /* We allow reading readonly memory. */
+ struct target_section *section;
+
+ section = target_section_by_addr (ops, offset);
+ if (section != NULL)
+ {
+ /* Check if the section we found is readonly. */
+ if ((bfd_get_section_flags (section->bfd,
+ section->the_bfd_section)
+ & SEC_READONLY) != 0)
+ {
+ /* Truncate the request to fit into this section. */
+ len = min (len, section->endaddr - offset);
+ break;
+ }
+ }
+
+ throw_error (NOT_AVAILABLE_ERROR,
+ _("This record target does not trace memory."));
+ }
+ }
+ }
+
+ /* Forward the request. */
+ for (t = ops->beneath; t != NULL; t = t->beneath)
+ if (t->to_xfer_partial != NULL)
+ return t->to_xfer_partial (t, object, annex, readbuf, writebuf,
+ offset, len);
+
+ return -1;
+}
+
/* The to_fetch_registers method of target record-btrace. */
static void
@@ -975,6 +1033,7 @@ init_record_btrace_ops (void)
ops->to_call_history_from = record_btrace_call_history_from;
ops->to_call_history_range = record_btrace_call_history_range;
ops->to_record_is_replaying = record_btrace_is_replaying;
+ ops->to_xfer_partial = record_btrace_xfer_partial;
ops->to_fetch_registers = record_btrace_fetch_registers;
ops->to_store_registers = record_btrace_store_registers;
ops->to_prepare_to_store = record_btrace_prepare_to_store;
--
1.7.1
^ permalink raw reply [flat|nested] 24+ messages in thread* [PATCH 08/15] target: add ops parameter to to_prepare_to_store method
2013-05-02 12:03 [PATCH 00/15] record-btrace: goto support Markus Metzger
` (3 preceding siblings ...)
2013-05-02 12:03 ` [PATCH 12/15] record-btrace: provide xfer_partial target method Markus Metzger
@ 2013-05-02 12:03 ` Markus Metzger
2013-05-02 12:03 ` [PATCH 09/15] record-btrace: supply register target methods Markus Metzger
` (9 subsequent siblings)
14 siblings, 0 replies; 24+ messages in thread
From: Markus Metzger @ 2013-05-02 12:03 UTC (permalink / raw)
To: jan.kratochvil; +Cc: gdb-patches
To allow forwarding the prepare_to_store request to the target beneath,
add a target_ops * parameter.
2013-05-02 Markus Metzger <markus.t.metzger@intel.com>
* target.h (target_ops) <to_prepare_to_store>: Add parameter.
(target_prepare_to_store): Remove macro. New function.
* target.c (update_current_target): Do not inherit/default
prepare_to_store.
(target_prepare_to_store): New.
(debug_to_prepare_to_store): Remove.
* remote.c (remote_prepare_to_store): Add parameter.
* remote-mips.c (mips_prepare_to_store): Add parameter.
* remote-m32r-sdi.c (m32r_prepare_to_store): Add parameter.
* ravenscar-thread.c (ravenscar_prepare_to_store): Add
parameter.
* monitor.c (monitor_prepare_to_store): Add parameter.
* inf-child.c (inf_child_prepare_to_store): Add parameter.
---
gdb/inf-child.c | 2 +-
gdb/monitor.c | 2 +-
gdb/ravenscar-thread.c | 7 ++++---
gdb/record-full.c | 3 ++-
gdb/remote-m32r-sdi.c | 2 +-
gdb/remote-mips.c | 5 +++--
gdb/remote.c | 5 +++--
gdb/target.c | 36 +++++++++++++++++++++---------------
gdb/target.h | 5 ++---
9 files changed, 38 insertions(+), 29 deletions(-)
diff --git a/gdb/inf-child.c b/gdb/inf-child.c
index 15d8613..6ec7411 100644
--- a/gdb/inf-child.c
+++ b/gdb/inf-child.c
@@ -103,7 +103,7 @@ inf_child_post_attach (int pid)
program being debugged. */
static void
-inf_child_prepare_to_store (struct regcache *regcache)
+inf_child_prepare_to_store (struct target_ops *ops, struct regcache *regcache)
{
}
diff --git a/gdb/monitor.c b/gdb/monitor.c
index beca4e4..8b1059c 100644
--- a/gdb/monitor.c
+++ b/gdb/monitor.c
@@ -1427,7 +1427,7 @@ monitor_store_registers (struct target_ops *ops,
debugged. */
static void
-monitor_prepare_to_store (struct regcache *regcache)
+monitor_prepare_to_store (struct target_ops *ops, struct regcache *regcache)
{
/* Do nothing, since we can store individual regs. */
}
diff --git a/gdb/ravenscar-thread.c b/gdb/ravenscar-thread.c
index 0a3100d..adcd3a2 100644
--- a/gdb/ravenscar-thread.c
+++ b/gdb/ravenscar-thread.c
@@ -62,7 +62,8 @@ static void ravenscar_fetch_registers (struct target_ops *ops,
struct regcache *regcache, int regnum);
static void ravenscar_store_registers (struct target_ops *ops,
struct regcache *regcache, int regnum);
-static void ravenscar_prepare_to_store (struct regcache *regcache);
+static void ravenscar_prepare_to_store (struct target_ops *ops,
+ struct regcache *regcache);
static void ravenscar_resume (struct target_ops *ops, ptid_t ptid, int step,
enum gdb_signal siggnal);
static void ravenscar_mourn_inferior (struct target_ops *ops);
@@ -303,14 +304,14 @@ ravenscar_store_registers (struct target_ops *ops,
}
static void
-ravenscar_prepare_to_store (struct regcache *regcache)
+ravenscar_prepare_to_store (struct target_ops *ops, struct regcache *regcache)
{
struct target_ops *beneath = find_target_beneath (&ravenscar_ops);
if (!ravenscar_runtime_initialized ()
|| ptid_equal (inferior_ptid, base_magic_null_ptid)
|| ptid_equal (inferior_ptid, ravenscar_running_thread ()))
- beneath->to_prepare_to_store (regcache);
+ beneath->to_prepare_to_store (beneath, regcache);
else
{
struct gdbarch *gdbarch = get_regcache_arch (regcache);
diff --git a/gdb/record-full.c b/gdb/record-full.c
index aa3ad85..7eff04d 100644
--- a/gdb/record-full.c
+++ b/gdb/record-full.c
@@ -2148,7 +2148,8 @@ record_full_core_fetch_registers (struct target_ops *ops,
/* "to_prepare_to_store" method for prec over corefile. */
static void
-record_full_core_prepare_to_store (struct regcache *regcache)
+record_full_core_prepare_to_store (struct target_ops *ops,
+ struct regcache *regcache)
{
}
diff --git a/gdb/remote-m32r-sdi.c b/gdb/remote-m32r-sdi.c
index 2f910e6..1955ec1 100644
--- a/gdb/remote-m32r-sdi.c
+++ b/gdb/remote-m32r-sdi.c
@@ -1013,7 +1013,7 @@ m32r_store_register (struct target_ops *ops,
debugged. */
static void
-m32r_prepare_to_store (struct regcache *regcache)
+m32r_prepare_to_store (struct target_ops *target, struct regcache *regcache)
{
/* Do nothing, since we can store individual regs. */
if (remote_debug)
diff --git a/gdb/remote-mips.c b/gdb/remote-mips.c
index 3b65b59..0f8830b 100644
--- a/gdb/remote-mips.c
+++ b/gdb/remote-mips.c
@@ -92,7 +92,8 @@ static int mips_map_regno (struct gdbarch *, int);
static void mips_set_register (int regno, ULONGEST value);
-static void mips_prepare_to_store (struct regcache *regcache);
+static void mips_prepare_to_store (struct target_ops *ops,
+ struct regcache *regcache);
static int mips_fetch_word (CORE_ADDR addr, unsigned int *valp);
@@ -2058,7 +2059,7 @@ mips_fetch_registers (struct target_ops *ops,
registers, so this function doesn't have to do anything. */
static void
-mips_prepare_to_store (struct regcache *regcache)
+mips_prepare_to_store (struct target_ops *ops, struct regcache *regcache)
{
}
diff --git a/gdb/remote.c b/gdb/remote.c
index e1c63ad..95323a5 100644
--- a/gdb/remote.c
+++ b/gdb/remote.c
@@ -101,7 +101,8 @@ void async_remote_interrupt_twice (gdb_client_data);
static void remote_files_info (struct target_ops *ignore);
-static void remote_prepare_to_store (struct regcache *regcache);
+static void remote_prepare_to_store (struct target_ops *ops,
+ struct regcache *regcache);
static void remote_open (char *name, int from_tty);
@@ -6270,7 +6271,7 @@ remote_fetch_registers (struct target_ops *ops,
first. */
static void
-remote_prepare_to_store (struct regcache *regcache)
+remote_prepare_to_store (struct target_ops *ops, struct regcache *regcache)
{
struct remote_arch_state *rsa = get_remote_arch_state ();
int i;
diff --git a/gdb/target.c b/gdb/target.c
index 8f8e46a..69073d6 100644
--- a/gdb/target.c
+++ b/gdb/target.c
@@ -96,8 +96,6 @@ static struct target_ops debug_target;
static void debug_to_open (char *, int);
-static void debug_to_prepare_to_store (struct regcache *);
-
static void debug_to_files_info (struct target_ops *);
static int debug_to_insert_breakpoint (struct gdbarch *,
@@ -626,7 +624,7 @@ update_current_target (void)
/* Do not inherit to_wait. */
/* Do not inherit to_fetch_registers. */
/* Do not inherit to_store_registers. */
- INHERIT (to_prepare_to_store, t);
+ /* Do not inherit to_prepare_to_store. */
INHERIT (deprecated_xfer_memory, t);
INHERIT (to_files_info, t);
INHERIT (to_insert_breakpoint, t);
@@ -759,9 +757,6 @@ update_current_target (void)
de_fault (to_post_attach,
(void (*) (int))
target_ignore);
- de_fault (to_prepare_to_store,
- (void (*) (struct regcache *))
- noprocess);
de_fault (deprecated_xfer_memory,
(int (*) (CORE_ADDR, gdb_byte *, int, int,
struct mem_attrib *, struct target_ops *))
@@ -4056,6 +4051,26 @@ target_store_registers (struct regcache *regcache, int regno)
noprocess ();
}
+/* See target.h. */
+
+void
+target_prepare_to_store (struct regcache *regcache)
+{
+ struct target_ops *t;
+
+ for (t = current_target.beneath; t != NULL; t = t->beneath)
+ {
+ if (t->to_prepare_to_store != NULL)
+ {
+ t->to_prepare_to_store (t, regcache);
+ if (targetdebug)
+ fprintf_unfiltered (gdb_stdlog, "target_prepare_to_store");
+
+ return;
+ }
+ }
+}
+
int
target_core_of_thread (ptid_t ptid)
{
@@ -4508,14 +4523,6 @@ target_call_history_range (ULONGEST begin, ULONGEST end, int flags)
tcomplain ();
}
-static void
-debug_to_prepare_to_store (struct regcache *regcache)
-{
- debug_target.to_prepare_to_store (regcache);
-
- fprintf_unfiltered (gdb_stdlog, "target_prepare_to_store ()\n");
-}
-
static int
deprecated_debug_xfer_memory (CORE_ADDR memaddr, bfd_byte *myaddr, int len,
int write, struct mem_attrib *attrib,
@@ -4967,7 +4974,6 @@ setup_target_debug (void)
current_target.to_open = debug_to_open;
current_target.to_post_attach = debug_to_post_attach;
- current_target.to_prepare_to_store = debug_to_prepare_to_store;
current_target.deprecated_xfer_memory = deprecated_debug_xfer_memory;
current_target.to_files_info = debug_to_files_info;
current_target.to_insert_breakpoint = debug_to_insert_breakpoint;
diff --git a/gdb/target.h b/gdb/target.h
index 319fcc3..e26f2a8 100644
--- a/gdb/target.h
+++ b/gdb/target.h
@@ -432,7 +432,7 @@ struct target_ops
ptid_t, struct target_waitstatus *, int);
void (*to_fetch_registers) (struct target_ops *, struct regcache *, int);
void (*to_store_registers) (struct target_ops *, struct regcache *, int);
- void (*to_prepare_to_store) (struct regcache *);
+ void (*to_prepare_to_store) (struct target_ops *, struct regcache *);
/* Transfer LEN bytes of memory between GDB address MYADDR and
target address MEMADDR. If WRITE, transfer them to the target, else
@@ -1048,8 +1048,7 @@ extern void target_store_registers (struct regcache *regcache, int regs);
that REGISTERS contains all the registers from the program being
debugged. */
-#define target_prepare_to_store(regcache) \
- (*current_target.to_prepare_to_store) (regcache)
+extern void target_prepare_to_store (struct regcache *);
/* Determine current address space of thread PTID. */
--
1.7.1
^ permalink raw reply [flat|nested] 24+ messages in thread* [PATCH 09/15] record-btrace: supply register target methods
2013-05-02 12:03 [PATCH 00/15] record-btrace: goto support Markus Metzger
` (4 preceding siblings ...)
2013-05-02 12:03 ` [PATCH 08/15] target: add ops parameter to to_prepare_to_store method Markus Metzger
@ 2013-05-02 12:03 ` Markus Metzger
2013-05-02 12:03 ` [PATCH 06/15] record-btrace: make ranges include begin and end Markus Metzger
` (8 subsequent siblings)
14 siblings, 0 replies; 24+ messages in thread
From: Markus Metzger @ 2013-05-02 12:03 UTC (permalink / raw)
To: jan.kratochvil; +Cc: gdb-patches
Supply target methods to allow reading the PC. Forbid anything else.
2013-05-02 Markus Metzger <markus.t.metzger@intel.com>
* record-btrace.c (record_btrace_fetch_registers,
record_btrace_store_registers,
record_btrace_to_prepare_to_store): New.
(init_record_btrace_ops): Add the above.
---
gdb/record-btrace.c | 97 +++++++++++++++++++++++++++++++++++++++++++++++++++
1 files changed, 97 insertions(+), 0 deletions(-)
diff --git a/gdb/record-btrace.c b/gdb/record-btrace.c
index 56eccab..2299899 100644
--- a/gdb/record-btrace.c
+++ b/gdb/record-btrace.c
@@ -32,6 +32,7 @@
#include "ui-out.h"
#include "symtab.h"
#include "filenames.h"
+#include "regcache.h"
/* The target_ops of record-btrace. */
static struct target_ops record_btrace_ops;
@@ -789,6 +790,99 @@ record_btrace_is_replaying (void)
return 0;
}
+/* The to_fetch_registers method of target record-btrace. */
+
+static void
+record_btrace_fetch_registers (struct target_ops *ops,
+ struct regcache *regcache, int regno)
+{
+ struct btrace_insn_iterator *replay;
+ struct thread_info *tp;
+
+ tp = find_thread_ptid (inferior_ptid);
+ if (tp == NULL)
+ return;
+
+ replay = tp->btrace.replay;
+ if (replay != NULL)
+ {
+ const struct btrace_insn *insn;
+ struct gdbarch *gdbarch;
+ int pcreg;
+
+ gdbarch = get_regcache_arch (regcache);
+ pcreg = gdbarch_pc_regnum (gdbarch);
+ if (pcreg < 0)
+ error (_("Failed to determine PC register number."));
+
+ /* We can only provide the PC register. */
+ if (regno >= 0 && regno != pcreg)
+ throw_error (NOT_AVAILABLE_ERROR,
+ _("This record target does not trace registers."));
+
+ insn = btrace_insn_get (replay);
+ if (insn == NULL)
+ error (_("Failed to determine the current replay position."));
+
+ regcache_raw_supply (regcache, regno, &insn->pc);
+ }
+ else
+ {
+ struct target_ops *t;
+
+ for (t = ops->beneath; t != NULL; t = t->beneath)
+ if (t->to_fetch_registers != NULL)
+ {
+ t->to_fetch_registers (t, regcache, regno);
+ break;
+ }
+ }
+}
+
+/* The to_store_registers method of target record-btrace. */
+
+static void
+record_btrace_store_registers (struct target_ops *ops,
+ struct regcache *regcache, int regno)
+{
+ struct target_ops *t;
+
+ if (record_btrace_is_replaying ())
+ throw_error (NOT_AVAILABLE_ERROR,
+ _("This record target does not trace registers."));
+
+ if (may_write_registers == 0)
+ error (_("Writing to registers is not allowed (regno %d)"), regno);
+
+ for (t = ops->beneath; t != NULL; t = t->beneath)
+ if (t->to_store_registers != NULL)
+ {
+ t->to_store_registers (t, regcache, regno);
+ return;
+ }
+
+ noprocess ();
+}
+
+/* The to_prepare_to_store method of target record-btrace. */
+
+static void
+record_btrace_prepare_to_store (struct target_ops *ops,
+ struct regcache *regcache)
+{
+ struct target_ops *t;
+
+ if (record_btrace_is_replaying ())
+ return;
+
+ for (t = ops->beneath; t != NULL; t = t->beneath)
+ if (t->to_prepare_to_store != NULL)
+ {
+ t->to_prepare_to_store (t, regcache);
+ return;
+ }
+}
+
/* Initialize the record-btrace target ops. */
static void
@@ -816,6 +910,9 @@ init_record_btrace_ops (void)
ops->to_call_history_from = record_btrace_call_history_from;
ops->to_call_history_range = record_btrace_call_history_range;
ops->to_record_is_replaying = record_btrace_is_replaying;
+ ops->to_fetch_registers = record_btrace_fetch_registers;
+ ops->to_store_registers = record_btrace_store_registers;
+ ops->to_prepare_to_store = record_btrace_prepare_to_store;
ops->to_stratum = record_stratum;
ops->to_magic = OPS_MAGIC;
}
--
1.7.1
^ permalink raw reply [flat|nested] 24+ messages in thread* [PATCH 06/15] record-btrace: make ranges include begin and end
2013-05-02 12:03 [PATCH 00/15] record-btrace: goto support Markus Metzger
` (5 preceding siblings ...)
2013-05-02 12:03 ` [PATCH 09/15] record-btrace: supply register target methods Markus Metzger
@ 2013-05-02 12:03 ` Markus Metzger
2013-05-02 15:51 ` Eli Zaretskii
2013-05-02 12:03 ` [PATCH 14/15] record-btrace: add record goto target methods Markus Metzger
` (7 subsequent siblings)
14 siblings, 1 reply; 24+ messages in thread
From: Markus Metzger @ 2013-05-02 12:03 UTC (permalink / raw)
To: jan.kratochvil; +Cc: gdb-patches, Eli Zaretskii, Christian Himpel
The "record function-call-history" and "record instruction-history" commands
accept a range "begin, end". End is not included in both cases. Include it.
CC: Eli Zaretskii <eliz@gnu.org>
CC: Christian Himpel <christian.himpel@intel.com>
2013-05-02 Markus Metzger <markus.t.metzger@intel.com>
* record-btrace.c (record_btrace_insn_history_range): Include
end.
(record_btrace_insn_history_from): Adjust range.
(record_btrace_call_history_range): Include
end.
(record_btrace_call_history_from): Adjust range.
testsuite/
* gdb.btrace/function_call_history.exp: Update tests.
* gdb.btrace/instruction_history.exp: Update tests.
doc/
* gdb.texinfo (Process Record and Replay): Update documentation.
---
gdb/doc/gdb.texinfo | 6 +---
gdb/record-btrace.c | 29 +++++++++++++++-----
gdb/testsuite/gdb.btrace/function_call_history.exp | 4 +-
gdb/testsuite/gdb.btrace/instruction_history.exp | 6 ++--
4 files changed, 29 insertions(+), 16 deletions(-)
diff --git a/gdb/doc/gdb.texinfo b/gdb/doc/gdb.texinfo
index 99af587..8175927 100644
--- a/gdb/doc/gdb.texinfo
+++ b/gdb/doc/gdb.texinfo
@@ -6344,8 +6344,7 @@ Disassembles ten more instructions before the last disassembly.
@item record instruction-history @var{begin} @var{end}
Disassembles instructions beginning with instruction number
-@var{begin} until instruction number @var{end}. The instruction
-number @var{end} is not included.
+@var{begin} until instruction number @var{end}.
@end table
This command may not be available for all recording methods.
@@ -6415,8 +6414,7 @@ Prints ten more functions before the last ten-line print.
@item record function-call-history @var{begin} @var{end}
Prints functions beginning with function number @var{begin} until
-function number @var{end}. The function number @var{end} is not
-included.
+function number @var{end}.
@end table
This command may not be available for all recording methods.
diff --git a/gdb/record-btrace.c b/gdb/record-btrace.c
index 9d73286..ebb26ad 100644
--- a/gdb/record-btrace.c
+++ b/gdb/record-btrace.c
@@ -370,10 +370,17 @@ record_btrace_insn_history_range (ULONGEST from, ULONGEST to, int flags)
if (found == 0)
error (_("Range out of bounds."));
- /* Silently truncate the range, if necessary. */
found = btrace_find_insn_by_number (&end, btinfo, high);
if (found == 0)
- btrace_insn_end (&end, btinfo);
+ {
+ /* Silently truncate the range. */
+ btrace_insn_end (&end, btinfo);
+ }
+ else
+ {
+ /* We want both begin and end to be inclusive. */
+ btrace_insn_next (&end, 1);
+ }
btrace_insn_history (uiout, &begin, &end, flags);
btrace_set_insn_history (btinfo, &begin, &end);
@@ -389,6 +396,8 @@ record_btrace_insn_history_from (ULONGEST from, int size, int flags)
ULONGEST begin, end, context;
context = abs (size);
+ if (context == 0)
+ error (_("Bad record instruction-history-size."));
if (size < 0)
{
@@ -397,12 +406,12 @@ record_btrace_insn_history_from (ULONGEST from, int size, int flags)
if (from < context)
begin = 0;
else
- begin = from - context;
+ begin = from - context + 1;
}
else
{
begin = from;
- end = from + context;
+ end = from + context - 1;
/* Check for wrap-around. */
if (end < begin)
@@ -672,8 +681,12 @@ record_btrace_call_history_range (ULONGEST from, ULONGEST to, int flags)
if (begin == NULL)
error (_("Range out of bounds."));
- /* Silently truncate the range, if necessary. */
end = btrace_find_function_by_number (btinfo, high);
+ /* We want both begin and end to be inclusive. */
+ if (end != NULL)
+ end = end->flow.next;
+
+ /* Silently truncate the range, if necessary. */
if (end == NULL)
end = btinfo->end;
@@ -691,6 +704,8 @@ record_btrace_call_history_from (ULONGEST from, int size, int flags)
ULONGEST begin, end, context;
context = abs (size);
+ if (context == 0)
+ error (_("Bad record function-call-history-size."));
if (size < 0)
{
@@ -699,12 +714,12 @@ record_btrace_call_history_from (ULONGEST from, int size, int flags)
if (from < context)
begin = 0;
else
- begin = from - context;
+ begin = from - context + 1;
}
else
{
begin = from;
- end = from + context;
+ end = from + context - 1;
/* Check for wrap-around. */
if (end < begin)
diff --git a/gdb/testsuite/gdb.btrace/function_call_history.exp b/gdb/testsuite/gdb.btrace/function_call_history.exp
index e528625..86f2c21 100644
--- a/gdb/testsuite/gdb.btrace/function_call_history.exp
+++ b/gdb/testsuite/gdb.btrace/function_call_history.exp
@@ -222,9 +222,9 @@ set expected_range "3\tinc\r
9\tinc\r"
# show functions in instruction range
-gdb_test "record function-call-history 3,10" $expected_range "absolute instruction range"
+gdb_test "record function-call-history 3,9" $expected_range "absolute instruction range"
gdb_test "record function-call-history 3,+7" $expected_range "relative positive instruction range"
-gdb_test "record function-call-history 10,-7" $expected_range "relative negative instruction range"
+gdb_test "record function-call-history 9,-7" $expected_range "relative negative instruction range"
# set bp after fib recursion and continue
set bp_location [gdb_get_line_number "bp.2" $testfile.c]
diff --git a/gdb/testsuite/gdb.btrace/instruction_history.exp b/gdb/testsuite/gdb.btrace/instruction_history.exp
index c1a61b7..147c2ef 100644
--- a/gdb/testsuite/gdb.btrace/instruction_history.exp
+++ b/gdb/testsuite/gdb.btrace/instruction_history.exp
@@ -65,7 +65,7 @@ if { $traced != 7 } {
}
# test that we see the expected instructions
-gdb_test "record instruction-history 1,6" "
+gdb_test "record instruction-history 1,5" "
1\t 0x\[0-9a-f\]+ <loop\\+\[0-9\]+>:\tje 0x\[0-9a-f\]+ <loop\\+\[0-9\]+>\r
2\t 0x\[0-9a-f\]+ <loop\\+\[0-9\]+>:\tdec %eax\r
3\t 0x\[0-9a-f\]+ <loop\\+\[0-9\]+>:\tjmp 0x\[0-9a-f\]+ <loop\\+\[0-9\]+>\r
@@ -79,14 +79,14 @@ gdb_test "record instruction-history /f 1,+5" "
4\t 0x\[0-9a-f\]+ <\\+\[0-9\]+>:\tcmp \\\$0x0,%eax\r
5\t 0x\[0-9a-f\]+ <\\+\[0-9\]+>:\tje 0x\[0-9a-f\]+ <loop\\+\[0-9\]+>\r"
-gdb_test "record instruction-history /p 6,-5" "
+gdb_test "record instruction-history /p 5,-5" "
1\t0x\[0-9a-f\]+ <loop\\+\[0-9\]+>:\tje 0x\[0-9a-f\]+ <loop\\+\[0-9\]+>\r
2\t0x\[0-9a-f\]+ <loop\\+\[0-9\]+>:\tdec %eax\r
3\t0x\[0-9a-f\]+ <loop\\+\[0-9\]+>:\tjmp 0x\[0-9a-f\]+ <loop\\+\[0-9\]+>\r
4\t0x\[0-9a-f\]+ <loop\\+\[0-9\]+>:\tcmp \\\$0x0,%eax\r
5\t0x\[0-9a-f\]+ <loop\\+\[0-9\]+>:\tje 0x\[0-9a-f\]+ <loop\\+\[0-9\]+>\r"
-gdb_test "record instruction-history /pf 1,6" "
+gdb_test "record instruction-history /pf 1,5" "
1\t0x\[0-9a-f\]+ <\\+\[0-9\]+>:\tje 0x\[0-9a-f\]+ <loop\\+\[0-9\]+>\r
2\t0x\[0-9a-f\]+ <\\+\[0-9\]+>:\tdec %eax\r
3\t0x\[0-9a-f\]+ <\\+\[0-9\]+>:\tjmp 0x\[0-9a-f\]+ <loop\\+\[0-9\]+>\r
--
1.7.1
^ permalink raw reply [flat|nested] 24+ messages in thread* [PATCH 14/15] record-btrace: add record goto target methods
2013-05-02 12:03 [PATCH 00/15] record-btrace: goto support Markus Metzger
` (6 preceding siblings ...)
2013-05-02 12:03 ` [PATCH 06/15] record-btrace: make ranges include begin and end Markus Metzger
@ 2013-05-02 12:03 ` Markus Metzger
2013-05-02 17:11 ` Eli Zaretskii
2013-05-02 12:03 ` [PATCH 10/15] frame, backtrace: allow targets to supply a frame unwinder Markus Metzger
` (6 subsequent siblings)
14 siblings, 1 reply; 24+ messages in thread
From: Markus Metzger @ 2013-05-02 12:03 UTC (permalink / raw)
To: jan.kratochvil; +Cc: gdb-patches, Eli Zaretskii, Christian Himpel
CC: Eli Zaretskii <eliz@gnu.org>
CC: Christian Himpel <christian.himpel@intel.com>
2013-05-02 Markus Metzger <markus.t.metzger@intel.com>
* record-btrace.c (record_btrace_set_replay,
record_btrace_goto_begin, record_btrace_goto_end,
record_btrace_goto): New.
(init_record_btrace_ops): Initialize them.
* NEWS: Announce it.
testsuite/
* gdb.btrace/Makefile.in (EXECUTABLES): Add record_goto.
* gdb.btrace/record_goto.c: New.
* gdb.btrace/record_goto.exp: New.
* gdb.btrace/x86-record_goto.S: New.
---
gdb/NEWS | 2 +
gdb/record-btrace.c | 91 ++++++++
gdb/testsuite/gdb.btrace/Makefile.in | 2 +-
gdb/testsuite/gdb.btrace/record_goto.c | 51 +++++
gdb/testsuite/gdb.btrace/record_goto.exp | 153 +++++++++++++
gdb/testsuite/gdb.btrace/x86-record_goto.S | 332 ++++++++++++++++++++++++++++
6 files changed, 630 insertions(+), 1 deletions(-)
create mode 100644 gdb/testsuite/gdb.btrace/record_goto.c
create mode 100644 gdb/testsuite/gdb.btrace/record_goto.exp
create mode 100644 gdb/testsuite/gdb.btrace/x86-record_goto.S
diff --git a/gdb/NEWS b/gdb/NEWS
index 2eeb59d..ba17f7d 100644
--- a/gdb/NEWS
+++ b/gdb/NEWS
@@ -3,6 +3,8 @@
*** Changes since GDB 7.6
+* The btrace record target supports the 'record goto' command.
+
* The command 'record function-call-history' supports a new modifier '/c' to
indent the function names based on their call stack depth.
The fields for the '/i' and '/l' modifier have been reordered.
diff --git a/gdb/record-btrace.c b/gdb/record-btrace.c
index a16e7ea..388c2d7 100644
--- a/gdb/record-btrace.c
+++ b/gdb/record-btrace.c
@@ -1047,6 +1047,94 @@ record_btrace_wait (struct target_ops *ops, ptid_t ptid,
return t->to_wait (t, ptid, status, options);
}
+/* Set the replay branch trace instruction iterator. */
+
+static void
+record_btrace_set_replay (struct btrace_thread_info *btinfo,
+ const struct btrace_insn_iterator *it)
+{
+ if (it == NULL || it->function == NULL)
+ {
+ if (btinfo->replay == NULL)
+ return;
+
+ xfree (btinfo->replay);
+ btinfo->replay = NULL;
+ }
+ else
+ {
+ if (btinfo->replay == NULL)
+ btinfo->replay = xzalloc (sizeof (*btinfo->replay));
+ else if (btrace_insn_cmp (btinfo->replay, it) == 0)
+ return;
+
+ *btinfo->replay = *it;
+ }
+
+ /* Clear the function call and instruction histories so we start anew
+ from the new replay position. */
+ xfree (btinfo->insn_history);
+ xfree (btinfo->call_history);
+
+ btinfo->insn_history = NULL;
+ btinfo->call_history = NULL;
+
+ registers_changed ();
+ reinit_frame_cache ();
+ print_stack_frame (get_selected_frame (NULL), 1, SRC_AND_LOC);
+}
+
+/* The to_goto_record_begin method of target record-btrace. */
+
+static void
+record_btrace_goto_begin (void)
+{
+ struct btrace_thread_info *btinfo;
+ struct btrace_insn_iterator begin;
+
+ btinfo = require_btrace ();
+
+ btrace_insn_begin (&begin, btinfo);
+ record_btrace_set_replay (btinfo, &begin);
+}
+
+/* The to_goto_record_end method of target record-btrace. */
+
+static void
+record_btrace_goto_end (void)
+{
+ struct btrace_thread_info *btinfo;
+
+ btinfo = require_btrace ();
+
+ record_btrace_set_replay (btinfo, NULL);
+}
+
+/* The to_goto_record method of target record-btrace. */
+
+static void
+record_btrace_goto (ULONGEST insn)
+{
+ struct btrace_thread_info *btinfo;
+ struct btrace_insn_iterator it;
+ unsigned int number;
+ int found;
+
+ number = (unsigned int) insn;
+
+ /* Check for wrap-arounds. */
+ if (number != insn)
+ error (_("Instruction number out of range."));
+
+ btinfo = require_btrace ();
+
+ found = btrace_find_insn_by_number (&it, btinfo, number);
+ if (found == 0)
+ error (_("No such instruction."));
+
+ record_btrace_set_replay (btinfo, &it);
+}
+
/* Initialize the record-btrace target ops. */
static void
@@ -1081,6 +1169,9 @@ init_record_btrace_ops (void)
ops->to_get_unwinder = &record_btrace_frame_unwind;
ops->to_resume = record_btrace_resume;
ops->to_wait = record_btrace_wait;
+ ops->to_goto_record_begin = record_btrace_goto_begin;
+ ops->to_goto_record_end = record_btrace_goto_end;
+ ops->to_goto_record = record_btrace_goto;
ops->to_stratum = record_stratum;
ops->to_magic = OPS_MAGIC;
}
diff --git a/gdb/testsuite/gdb.btrace/Makefile.in b/gdb/testsuite/gdb.btrace/Makefile.in
index 5c70700..aa2820a 100644
--- a/gdb/testsuite/gdb.btrace/Makefile.in
+++ b/gdb/testsuite/gdb.btrace/Makefile.in
@@ -2,7 +2,7 @@ VPATH = @srcdir@
srcdir = @srcdir@
EXECUTABLES = enable function_call_history instruction_history tailcall \
- exception
+ exception record_goto
MISCELLANEOUS =
diff --git a/gdb/testsuite/gdb.btrace/record_goto.c b/gdb/testsuite/gdb.btrace/record_goto.c
new file mode 100644
index 0000000..1250708
--- /dev/null
+++ b/gdb/testsuite/gdb.btrace/record_goto.c
@@ -0,0 +1,51 @@
+/* This testcase is part of GDB, the GNU debugger.
+
+ Copyright 2013 Free Software Foundation, Inc.
+
+ Contributed by Intel Corp. <markus.t.metzger@intel.com>
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see <http://www.gnu.org/licenses/>. */
+
+void
+fun1 (void)
+{
+}
+
+void
+fun2 (void)
+{
+ fun1 ();
+}
+
+void
+fun3 (void)
+{
+ fun1 ();
+ fun2 ();
+}
+
+void
+fun4 (void)
+{
+ fun1 ();
+ fun2 ();
+ fun3 ();
+}
+
+int
+main (void)
+{
+ fun4 ();
+ return 0;
+}
diff --git a/gdb/testsuite/gdb.btrace/record_goto.exp b/gdb/testsuite/gdb.btrace/record_goto.exp
new file mode 100644
index 0000000..008d956
--- /dev/null
+++ b/gdb/testsuite/gdb.btrace/record_goto.exp
@@ -0,0 +1,153 @@
+# This testcase is part of GDB, the GNU debugger.
+#
+# Copyright 2013 Free Software Foundation, Inc.
+#
+# Contributed by Intel Corp. <markus.t.metzger@intel.com>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+# check for btrace support
+if { [skip_btrace_tests] } { return -1 }
+
+# start inferior
+standard_testfile x86-record_goto.S
+if [prepare_for_testing tailcall.exp $testfile $srcfile] {
+ return -1
+}
+if ![runto_main] {
+ return -1
+}
+
+# we want a small context sizes to simplify the test
+gdb_test_no_output "set record instruction-history-size 3"
+gdb_test_no_output "set record function-call-history-size 3"
+
+# trace the call to the test function
+gdb_test_no_output "record btrace"
+gdb_test "next"
+
+# start by listing all functions
+gdb_test "record function-call-history /ci 0, +20" "
+0\t fun4\tinst 0,2\r
+1\t fun1\tinst 3,6\r
+2\t fun4\tinst 7,7\r
+3\t fun2\tinst 8,10\r
+4\t fun1\tinst 11,14\r
+5\t fun2\tinst 15,16\r
+6\t fun4\tinst 17,17\r
+7\t fun3\tinst 18,20\r
+8\t fun1\tinst 21,24\r
+9\t fun3\tinst 25,25\r
+10\t fun2\tinst 26,28\r
+11\t fun1\tinst 29,32\r
+12\t fun2\tinst 33,34\r
+13\t fun3\tinst 35,36\r
+14\t fun4\tinst 37,38\r
+15\tmain\tinst 39,39" "record_goto - list all functions"
+
+# let's see if we can go back in history
+gdb_test "record goto 17" "
+.*fun4 \\(\\) at record_goto.c:43.*" "record_goto - goto 17"
+
+# the function call history should start at the new location
+gdb_test "record function-call-history /ci" "
+6\t fun4\tinst 17,17\r
+7\t fun3\tinst 18,20\r
+8\t fun1\tinst 21,24" "record_goto - function-call-history from 17 forwards"
+
+# the instruciton history should start at the new location
+gdb_test "record instruction-history" "
+17.*\r
+18.*\r
+19.*\r" "record_goto - instruciton-history from 17 forwards"
+
+# let's go to another place in the history
+gdb_test "record goto 25" "
+.*fun3 \\(\\) at record_goto.c:35.*" "record_goto - goto 25"
+
+# the function call history should start at the new location
+gdb_test "record function-call-history /ci -" "
+7\t fun3\tinst 18,20\r
+8\t fun1\tinst 21,24\r
+9\t fun3\tinst 25,25" "record_goto - function-call-history from 25 backwards"
+
+# the instruciton history should start at the new location
+gdb_test "record instruction-history -" "
+23.*\r
+24.*\r
+25.*\r" "record_goto - instruciton-history from 25 backwards"
+
+# test that we can go to the begin of the trace
+gdb_test "record goto begin" "
+.*fun4 \\(\\) at record_goto.c:40.*" "record_goto - goto begin"
+
+# check that we're filling up the context correctly
+gdb_test "record function-call-history /ci -" "
+0\t fun4\tinst 0,2\r
+1\t fun1\tinst 3,6\r
+2\t fun4\tinst 7,7" "record_goto - function-call-history from begin backwards"
+
+# check that we're filling up the context correctly
+gdb_test "record instruction-history -" "
+0.*\r
+1.*\r
+2.*\r" "record_goto - instruciton-history from begin backwards"
+
+# we should get the exact same history from the first instruction
+gdb_test "record goto 1" "
+.*fun4 \\(\\) at record_goto.c:40.*" "record_goto - goto 1"
+
+# check that we're filling up the context correctly
+gdb_test "record function-call-history /ci -" "
+0\t fun4\tinst 0,2\r
+1\t fun1\tinst 3,6\r
+2\t fun4\tinst 7,7" "record_goto - function-call-history from 1 backwards"
+
+# check that we're filling up the context correctly
+gdb_test "record instruction-history -" "
+0.*\r
+1.*\r
+2.*\r" "record_goto - instruciton-history from 1 backwards"
+
+# check that we can go to the end of the trace
+gdb_test "record goto end" "
+.*main \\(\\) at record_goto.c:50.*" "record_goto - goto end"
+
+# check that we're filling up the context correctly
+gdb_test "record function-call-history /ci" "
+13\t fun3\tinst 35,36\r
+14\t fun4\tinst 37,38\r
+15\tmain\tinst 39,39" "record_goto - function-call-history from end forwards"
+
+# check that we're filling up the context correctly
+gdb_test "record instruction-history" "
+37.*\r
+38.*\r
+39.*\r" "record_goto - instruciton-history from end forwards"
+
+# we should get the exact same history from the second to last instruction
+gdb_test "record goto 38" "
+.*fun4 \\(\\) at record_goto.c:44.*" "record_goto - goto 38"
+
+# check that we're filling up the context correctly
+gdb_test "record function-call-history /ci" "
+13\t fun3\tinst 35,36\r
+14\t fun4\tinst 37,38\r
+15\tmain\tinst 39,39" "record_goto - function-call-history from 38 forwards"
+
+# check that we're filling up the context correctly
+gdb_test "record instruction-history" "
+37.*\r
+38.*\r
+39.*\r" "record_goto - instruciton-history from 38 forwards"
diff --git a/gdb/testsuite/gdb.btrace/x86-record_goto.S b/gdb/testsuite/gdb.btrace/x86-record_goto.S
new file mode 100644
index 0000000..d2e6621
--- /dev/null
+++ b/gdb/testsuite/gdb.btrace/x86-record_goto.S
@@ -0,0 +1,332 @@
+/* This testcase is part of GDB, the GNU debugger.
+
+ Copyright 2013 Free Software Foundation, Inc.
+
+ Contributed by Intel Corp. <markus.t.metzger@intel.com>
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+
+ This file has been generated using:
+ gcc -S -g record_goto.c -o x86-record_goto.S */
+
+ .file "record_goto.c"
+ .section .debug_abbrev,"",@progbits
+.Ldebug_abbrev0:
+ .section .debug_info,"",@progbits
+.Ldebug_info0:
+ .section .debug_line,"",@progbits
+.Ldebug_line0:
+ .text
+.Ltext0:
+.globl fun1
+ .type fun1, @function
+fun1:
+.LFB0:
+ .file 1 "record_goto.c"
+ .loc 1 22 0
+ .cfi_startproc
+ pushq %rbp
+ .cfi_def_cfa_offset 16
+ movq %rsp, %rbp
+ .cfi_offset 6, -16
+ .cfi_def_cfa_register 6
+ .loc 1 23 0
+ leave
+ .cfi_def_cfa 7, 8
+ ret
+ .cfi_endproc
+.LFE0:
+ .size fun1, .-fun1
+.globl fun2
+ .type fun2, @function
+fun2:
+.LFB1:
+ .loc 1 27 0
+ .cfi_startproc
+ pushq %rbp
+ .cfi_def_cfa_offset 16
+ movq %rsp, %rbp
+ .cfi_offset 6, -16
+ .cfi_def_cfa_register 6
+ .loc 1 28 0
+ call fun1
+ .loc 1 29 0
+ leave
+ .cfi_def_cfa 7, 8
+ ret
+ .cfi_endproc
+.LFE1:
+ .size fun2, .-fun2
+.globl fun3
+ .type fun3, @function
+fun3:
+.LFB2:
+ .loc 1 33 0
+ .cfi_startproc
+ pushq %rbp
+ .cfi_def_cfa_offset 16
+ movq %rsp, %rbp
+ .cfi_offset 6, -16
+ .cfi_def_cfa_register 6
+ .loc 1 34 0
+ call fun1
+ .loc 1 35 0
+ call fun2
+ .loc 1 36 0
+ leave
+ .cfi_def_cfa 7, 8
+ ret
+ .cfi_endproc
+.LFE2:
+ .size fun3, .-fun3
+.globl fun4
+ .type fun4, @function
+fun4:
+.LFB3:
+ .loc 1 40 0
+ .cfi_startproc
+ pushq %rbp
+ .cfi_def_cfa_offset 16
+ movq %rsp, %rbp
+ .cfi_offset 6, -16
+ .cfi_def_cfa_register 6
+ .loc 1 41 0
+ call fun1
+ .loc 1 42 0
+ call fun2
+ .loc 1 43 0
+ call fun3
+ .loc 1 44 0
+ leave
+ .cfi_def_cfa 7, 8
+ ret
+ .cfi_endproc
+.LFE3:
+ .size fun4, .-fun4
+.globl main
+ .type main, @function
+main:
+.LFB4:
+ .loc 1 48 0
+ .cfi_startproc
+ pushq %rbp
+ .cfi_def_cfa_offset 16
+ movq %rsp, %rbp
+ .cfi_offset 6, -16
+ .cfi_def_cfa_register 6
+ .loc 1 49 0
+ call fun4
+ .loc 1 50 0
+ movl $0, %eax
+ .loc 1 51 0
+ leave
+ .cfi_def_cfa 7, 8
+ ret
+ .cfi_endproc
+.LFE4:
+ .size main, .-main
+.Letext0:
+ .section .debug_info
+ .long 0xbc
+ .value 0x3
+ .long .Ldebug_abbrev0
+ .byte 0x8
+ .uleb128 0x1
+ .long .LASF4
+ .byte 0x1
+ .long .LASF5
+ .long .LASF6
+ .quad .Ltext0
+ .quad .Letext0
+ .long .Ldebug_line0
+ .uleb128 0x2
+ .byte 0x1
+ .long .LASF0
+ .byte 0x1
+ .byte 0x15
+ .byte 0x1
+ .quad .LFB0
+ .quad .LFE0
+ .byte 0x1
+ .byte 0x9c
+ .uleb128 0x2
+ .byte 0x1
+ .long .LASF1
+ .byte 0x1
+ .byte 0x1a
+ .byte 0x1
+ .quad .LFB1
+ .quad .LFE1
+ .byte 0x1
+ .byte 0x9c
+ .uleb128 0x2
+ .byte 0x1
+ .long .LASF2
+ .byte 0x1
+ .byte 0x20
+ .byte 0x1
+ .quad .LFB2
+ .quad .LFE2
+ .byte 0x1
+ .byte 0x9c
+ .uleb128 0x2
+ .byte 0x1
+ .long .LASF3
+ .byte 0x1
+ .byte 0x27
+ .byte 0x1
+ .quad .LFB3
+ .quad .LFE3
+ .byte 0x1
+ .byte 0x9c
+ .uleb128 0x3
+ .byte 0x1
+ .long .LASF7
+ .byte 0x1
+ .byte 0x2f
+ .byte 0x1
+ .long 0xb8
+ .quad .LFB4
+ .quad .LFE4
+ .byte 0x1
+ .byte 0x9c
+ .uleb128 0x4
+ .byte 0x4
+ .byte 0x5
+ .string "int"
+ .byte 0x0
+ .section .debug_abbrev
+ .uleb128 0x1
+ .uleb128 0x11
+ .byte 0x1
+ .uleb128 0x25
+ .uleb128 0xe
+ .uleb128 0x13
+ .uleb128 0xb
+ .uleb128 0x3
+ .uleb128 0xe
+ .uleb128 0x1b
+ .uleb128 0xe
+ .uleb128 0x11
+ .uleb128 0x1
+ .uleb128 0x12
+ .uleb128 0x1
+ .uleb128 0x10
+ .uleb128 0x6
+ .byte 0x0
+ .byte 0x0
+ .uleb128 0x2
+ .uleb128 0x2e
+ .byte 0x0
+ .uleb128 0x3f
+ .uleb128 0xc
+ .uleb128 0x3
+ .uleb128 0xe
+ .uleb128 0x3a
+ .uleb128 0xb
+ .uleb128 0x3b
+ .uleb128 0xb
+ .uleb128 0x27
+ .uleb128 0xc
+ .uleb128 0x11
+ .uleb128 0x1
+ .uleb128 0x12
+ .uleb128 0x1
+ .uleb128 0x40
+ .uleb128 0xa
+ .byte 0x0
+ .byte 0x0
+ .uleb128 0x3
+ .uleb128 0x2e
+ .byte 0x0
+ .uleb128 0x3f
+ .uleb128 0xc
+ .uleb128 0x3
+ .uleb128 0xe
+ .uleb128 0x3a
+ .uleb128 0xb
+ .uleb128 0x3b
+ .uleb128 0xb
+ .uleb128 0x27
+ .uleb128 0xc
+ .uleb128 0x49
+ .uleb128 0x13
+ .uleb128 0x11
+ .uleb128 0x1
+ .uleb128 0x12
+ .uleb128 0x1
+ .uleb128 0x40
+ .uleb128 0xa
+ .byte 0x0
+ .byte 0x0
+ .uleb128 0x4
+ .uleb128 0x24
+ .byte 0x0
+ .uleb128 0xb
+ .uleb128 0xb
+ .uleb128 0x3e
+ .uleb128 0xb
+ .uleb128 0x3
+ .uleb128 0x8
+ .byte 0x0
+ .byte 0x0
+ .byte 0x0
+ .section .debug_pubnames,"",@progbits
+ .long 0x3b
+ .value 0x2
+ .long .Ldebug_info0
+ .long 0xc0
+ .long 0x2d
+ .string "fun1"
+ .long 0x48
+ .string "fun2"
+ .long 0x63
+ .string "fun3"
+ .long 0x7e
+ .string "fun4"
+ .long 0x99
+ .string "main"
+ .long 0x0
+ .section .debug_aranges,"",@progbits
+ .long 0x2c
+ .value 0x2
+ .long .Ldebug_info0
+ .byte 0x8
+ .byte 0x0
+ .value 0x0
+ .value 0x0
+ .quad .Ltext0
+ .quad .Letext0-.Ltext0
+ .quad 0x0
+ .quad 0x0
+ .section .debug_str,"MS",@progbits,1
+.LASF3:
+ .string "fun4"
+.LASF5:
+ .string "record_goto.c"
+.LASF4:
+ .string "GNU C 4.4.4 20100726 (Red Hat 4.4.4-13)"
+.LASF7:
+ .string "main"
+.LASF1:
+ .string "fun2"
+.LASF0:
+ .string "fun1"
+.LASF6:
+ .string "/users/mmetzger/gdb/gerrit/git/gdb/testsuite/gdb.btrace"
+.LASF2:
+ .string "fun3"
+ .ident "GCC: (GNU) 4.4.4 20100726 (Red Hat 4.4.4-13)"
+ .section .note.GNU-stack,"",@progbits
--
1.7.1
^ permalink raw reply [flat|nested] 24+ messages in thread* Re: [PATCH 14/15] record-btrace: add record goto target methods
2013-05-02 12:03 ` [PATCH 14/15] record-btrace: add record goto target methods Markus Metzger
@ 2013-05-02 17:11 ` Eli Zaretskii
0 siblings, 0 replies; 24+ messages in thread
From: Eli Zaretskii @ 2013-05-02 17:11 UTC (permalink / raw)
To: Markus Metzger; +Cc: jan.kratochvil, gdb-patches, christian.himpel
> From: Markus Metzger <markus.t.metzger@intel.com>
> Cc: gdb-patches@sourceware.org, Eli Zaretskii <eliz@gnu.org>, Christian Himpel <christian.himpel@intel.com>
> Date: Thu, 2 May 2013 14:03:35 +0200
>
> CC: Eli Zaretskii <eliz@gnu.org>
> CC: Christian Himpel <christian.himpel@intel.com>
> 2013-05-02 Markus Metzger <markus.t.metzger@intel.com>
>
> * record-btrace.c (record_btrace_set_replay,
> record_btrace_goto_begin, record_btrace_goto_end,
> record_btrace_goto): New.
> (init_record_btrace_ops): Initialize them.
> * NEWS: Announce it.
OK for the NEWS part.
^ permalink raw reply [flat|nested] 24+ messages in thread
* [PATCH 10/15] frame, backtrace: allow targets to supply a frame unwinder
2013-05-02 12:03 [PATCH 00/15] record-btrace: goto support Markus Metzger
` (7 preceding siblings ...)
2013-05-02 12:03 ` [PATCH 14/15] record-btrace: add record goto target methods Markus Metzger
@ 2013-05-02 12:03 ` Markus Metzger
2013-05-02 12:04 ` [PATCH 02/15] btrace: change branch trace data structure Markus Metzger
` (5 subsequent siblings)
14 siblings, 0 replies; 24+ messages in thread
From: Markus Metzger @ 2013-05-02 12:03 UTC (permalink / raw)
To: jan.kratochvil; +Cc: gdb-patches
Allow targets to supply an own target-specific frame unwinder. If a
target-specific unwinder is supplied, it will be chosen before any other
unwinder.
gdb/
2013-02-11 Jan Kratochvil <jan.kratochvil@redhat.com>
* dwarf2-frame.c (dwarf2_frame_cfa): Move UNWIND_UNAVAILABLE check
earlier.
* frame-unwind.c: Include target.h.
(frame_unwind_try_unwinder): New function with code from ...
(frame_unwind_find_by_frame): ... here. New variable
unwinder_from_target, call also target_get_unwinder and
frame_unwind_try_unwinder for it.
* frame.c (get_frame_unwind_stop_reason): Unconditionally call
get_prev_frame_1.
* target.c (target_get_unwinder): New.
* target.h (struct target_ops): New field to_get_unwinder.
(target_get_unwinder): New declaration.
---
gdb/dwarf2-frame.c | 8 ++--
gdb/frame-unwind.c | 80 +++++++++++++++++++++++++++++++++------------------
gdb/frame.c | 9 ++----
gdb/target.c | 14 +++++++++
gdb/target.h | 7 ++++
5 files changed, 80 insertions(+), 38 deletions(-)
diff --git a/gdb/dwarf2-frame.c b/gdb/dwarf2-frame.c
index 5c88b03..2aff23e 100644
--- a/gdb/dwarf2-frame.c
+++ b/gdb/dwarf2-frame.c
@@ -1497,16 +1497,16 @@ dwarf2_frame_cfa (struct frame_info *this_frame)
{
while (get_frame_type (this_frame) == INLINE_FRAME)
this_frame = get_prev_frame (this_frame);
+ if (get_frame_unwind_stop_reason (this_frame) == UNWIND_UNAVAILABLE)
+ throw_error (NOT_AVAILABLE_ERROR,
+ _("can't compute CFA for this frame: "
+ "required registers or memory are unavailable"));
/* This restriction could be lifted if other unwinders are known to
compute the frame base in a way compatible with the DWARF
unwinder. */
if (!frame_unwinder_is (this_frame, &dwarf2_frame_unwind)
&& !frame_unwinder_is (this_frame, &dwarf2_tailcall_frame_unwind))
error (_("can't compute CFA for this frame"));
- if (get_frame_unwind_stop_reason (this_frame) == UNWIND_UNAVAILABLE)
- throw_error (NOT_AVAILABLE_ERROR,
- _("can't compute CFA for this frame: "
- "required registers or memory are unavailable"));
return get_frame_base (this_frame);
}
\f
diff --git a/gdb/frame-unwind.c b/gdb/frame-unwind.c
index b66febf..fe5f8fb 100644
--- a/gdb/frame-unwind.c
+++ b/gdb/frame-unwind.c
@@ -27,6 +27,7 @@
#include "exceptions.h"
#include "gdb_assert.h"
#include "gdb_obstack.h"
+#include "target.h"
static struct gdbarch_data *frame_unwind_data;
@@ -88,6 +89,48 @@ frame_unwind_append_unwinder (struct gdbarch *gdbarch,
(*ip)->unwinder = unwinder;
}
+/* Call SNIFFER from UNWINDER. If it succeeded set UNWINDER for
+ THIS_FRAME and return 1. Otherwise the function keeps THIS_FRAME
+ unchanged and returns 0. */
+
+static int
+frame_unwind_try_unwinder (struct frame_info *this_frame, void **this_cache,
+ const struct frame_unwind *unwinder)
+{
+ struct cleanup *old_cleanup;
+ volatile struct gdb_exception ex;
+ int res = 0;
+
+ old_cleanup = frame_prepare_for_sniffer (this_frame, unwinder);
+
+ TRY_CATCH (ex, RETURN_MASK_ERROR)
+ {
+ res = unwinder->sniffer (unwinder, this_frame, this_cache);
+ }
+ if (ex.reason < 0 && ex.error == NOT_AVAILABLE_ERROR)
+ {
+ /* This usually means that not even the PC is available,
+ thus most unwinders aren't able to determine if they're
+ the best fit. Keep trying. Fallback prologue unwinders
+ should always accept the frame. */
+ do_cleanups (old_cleanup);
+ return 0;
+ }
+ else if (ex.reason < 0)
+ throw_exception (ex);
+ else if (res)
+ {
+ discard_cleanups (old_cleanup);
+ return 1;
+ }
+ else
+ {
+ do_cleanups (old_cleanup);
+ return 0;
+ }
+ gdb_assert_not_reached ("frame_unwind_try_unwinder");
+}
+
/* Iterate through sniffers for THIS_FRAME frame until one returns with an
unwinder implementation. THIS_FRAME->UNWIND must be NULL, it will get set
by this function. Possibly initialize THIS_CACHE. */
@@ -98,37 +141,18 @@ frame_unwind_find_by_frame (struct frame_info *this_frame, void **this_cache)
struct gdbarch *gdbarch = get_frame_arch (this_frame);
struct frame_unwind_table *table = gdbarch_data (gdbarch, frame_unwind_data);
struct frame_unwind_table_entry *entry;
+ const struct frame_unwind *unwinder_from_target;
+
+ unwinder_from_target = target_get_unwinder ();
+ if (unwinder_from_target != NULL
+ && frame_unwind_try_unwinder (this_frame, this_cache,
+ unwinder_from_target))
+ return;
for (entry = table->list; entry != NULL; entry = entry->next)
- {
- struct cleanup *old_cleanup;
- volatile struct gdb_exception ex;
- int res = 0;
-
- old_cleanup = frame_prepare_for_sniffer (this_frame, entry->unwinder);
-
- TRY_CATCH (ex, RETURN_MASK_ERROR)
- {
- res = entry->unwinder->sniffer (entry->unwinder, this_frame,
- this_cache);
- }
- if (ex.reason < 0 && ex.error == NOT_AVAILABLE_ERROR)
- {
- /* This usually means that not even the PC is available,
- thus most unwinders aren't able to determine if they're
- the best fit. Keep trying. Fallback prologue unwinders
- should always accept the frame. */
- }
- else if (ex.reason < 0)
- throw_exception (ex);
- else if (res)
- {
- discard_cleanups (old_cleanup);
- return;
- }
+ if (frame_unwind_try_unwinder (this_frame, this_cache, entry->unwinder))
+ return;
- do_cleanups (old_cleanup);
- }
internal_error (__FILE__, __LINE__, _("frame_unwind_find_by_frame failed"));
}
diff --git a/gdb/frame.c b/gdb/frame.c
index 8d4e2c8..fa59bee 100644
--- a/gdb/frame.c
+++ b/gdb/frame.c
@@ -2357,13 +2357,10 @@ get_frame_sp (struct frame_info *this_frame)
enum unwind_stop_reason
get_frame_unwind_stop_reason (struct frame_info *frame)
{
- /* If we haven't tried to unwind past this point yet, then assume
- that unwinding would succeed. */
- if (frame->prev_p == 0)
- return UNWIND_NO_REASON;
+ /* Fill-in STOP_REASON. */
+ get_prev_frame_1 (frame);
+ gdb_assert (frame->prev_p);
- /* Otherwise, we set a reason when we succeeded (or failed) to
- unwind. */
return frame->stop_reason;
}
diff --git a/gdb/target.c b/gdb/target.c
index 69073d6..cb4f5d9 100644
--- a/gdb/target.c
+++ b/gdb/target.c
@@ -4523,6 +4523,20 @@ target_call_history_range (ULONGEST begin, ULONGEST end, int flags)
tcomplain ();
}
+/* See target.h. */
+
+const struct frame_unwind *
+target_get_unwinder (void)
+{
+ struct target_ops *t;
+
+ for (t = current_target.beneath; t != NULL; t = t->beneath)
+ if (t->to_get_unwinder != NULL)
+ return t->to_get_unwinder;
+
+ return NULL;
+}
+
static int
deprecated_debug_xfer_memory (CORE_ADDR memaddr, bfd_byte *myaddr, int len,
int write, struct mem_attrib *attrib,
diff --git a/gdb/target.h b/gdb/target.h
index e26f2a8..e9dade9 100644
--- a/gdb/target.h
+++ b/gdb/target.h
@@ -938,6 +938,10 @@ struct target_ops
(inclusive) to function END (exclusive). */
void (*to_call_history_range) (ULONGEST begin, ULONGEST end, int flags);
+ /* This unwinder is tried before any other arch unwinders. Use NULL if it
+ is not used. */
+ const struct frame_unwind *to_get_unwinder;
+
int to_magic;
/* Need sub-structure for target machine related rather than comm related?
*/
@@ -1816,6 +1820,9 @@ extern char *target_fileio_read_stralloc (const char *filename);
extern int target_core_of_thread (ptid_t ptid);
+/* See to_get_unwinder in struct target_ops. */
+extern const struct frame_unwind *target_get_unwinder (void);
+
/* Verify that the memory in the [MEMADDR, MEMADDR+SIZE) range matches
the contents of [DATA,DATA+SIZE). Returns 1 if there's a match, 0
if there's a mismatch, and -1 if an error is encountered while
--
1.7.1
^ permalink raw reply [flat|nested] 24+ messages in thread* [PATCH 02/15] btrace: change branch trace data structure
2013-05-02 12:03 [PATCH 00/15] record-btrace: goto support Markus Metzger
` (8 preceding siblings ...)
2013-05-02 12:03 ` [PATCH 10/15] frame, backtrace: allow targets to supply a frame unwinder Markus Metzger
@ 2013-05-02 12:04 ` Markus Metzger
2013-05-13 15:25 ` Jan Kratochvil
2013-05-02 12:04 ` [PATCH 15/15] record-btrace: extend unwinder Markus Metzger
` (4 subsequent siblings)
14 siblings, 1 reply; 24+ messages in thread
From: Markus Metzger @ 2013-05-02 12:04 UTC (permalink / raw)
To: jan.kratochvil; +Cc: gdb-patches, Christian Himpel
The branch trace is represented as 3 vectors:
- a block vector
- a instruction vector
- a function vector
Each vector (except for the first) is computed from the one above.
Change this into a graph where a node represents a sequence of instructions
belonging to the same function and where we have three types of edges to connect
the function segments:
- control flow
- same function (instance)
- call stack
This allows us to navigate in the branch trace. We will need this for "record
goto" and reverse execution.
This patch introduces the data structure and computes the control flow edges.
It also fixes PR gdb/15240 since now recursive calls are handled correctly.
Fix the test that got the number of expected fib instances and also the
function numbers wrong.
CC: Christian Himpel <christian.himpel@intel.com>
2013-05-02 Markus Metzger <markus.t.metzger@intel.com>
* btrace.h (struct btrace_func_link): New.
(enum btrace_function_flag): New.
(struct btrace_inst): Rename to ...
(struct btrace_insn): ...this. Update all users.
(struct btrace_func) <ibegin, iend>: Remove.
(struct btrace_func): Rename to ...
(struct btrace_function): ...this. Update all users.
(struct btrace_function) <segment, flow, up, insn, insn_offset,
number, level, flags>: New.
(struct btrace_insn_iterator): Rename to ...
(struct btrace_insn_history): ...this.
Update all users.
(struct btrace_target_info) <btrace, itrace, ftrace>: Remove.
(struct btrace_target_info) <begin, end, level,
insn_history, call_history>: New.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin,
btrace_insn_end, btrace_insn_prev, btrace_insn_next,
btrace_insn_cmp, btrace_find_insn_by_number,
btrace_find_function_by_number, btrace_set_insn_history,
btrace_set_call_history): New.
* btrace.c (btrace_init_insn_iterator,
btrace_init_func_iterator, compute_itrace): Remove.
(ftrace_debug): Use new btrace_function fields.
(ftrace_function_switched): Also consider gaining and
losing symbol information).
(ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return,
ftrace_new_switch, ftrace_find_caller, ftrace_new_function,
ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall):
New.
(ftrace_new_function): Move. Remove debug print.
(ftrace_update_lines, ftrace_update_insns): New.
(ftrace_update_function): Check for call, ret, and jump.
(compute_ftrace): Renamed to ...
(btrace_compute_ftrace): ...this. Rewritten to compute call
stack.
(btrace_fetch, btrace_clear): Updated.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin,
btrace_insn_end, btrace_insn_prev, btrace_insn_next,
btrace_insn_cmp, btrace_find_insn_by_number,
btrace_find_function_by_number, btrace_set_insn_history,
btrace_set_call_history): New.
* record-btrace.c (require_btrace, record_btrace_info,
btrace_insn_history, record_btrace_insn_history,
record_btrace_insn_history_range): Use new btrace thread
info fields.
(btrace_func_history_src_line): Rename to ...
(btrace_call_history_src_line): ...this. Use new btrace
thread info fields.
(btrace_func_history): Rename to ...
(btrace_call_history): ...this. Use new btrace thread info
fields.
(record_btrace_call_history, record_btrace_call_history_range):
Use new btrace thread info fields.
testsuite/
* gdb.btrace/function_call_history.exp: Fix expected function
trace.
---
gdb/btrace.c | 965 ++++++++++++++++----
gdb/btrace.h | 167 +++-
gdb/record-btrace.c | 379 +++++----
gdb/testsuite/gdb.btrace/function_call_history.exp | 28 +-
4 files changed, 1161 insertions(+), 378 deletions(-)
diff --git a/gdb/btrace.c b/gdb/btrace.c
index 3230a3e..9478350 100644
--- a/gdb/btrace.c
+++ b/gdb/btrace.c
@@ -45,92 +45,11 @@
#define DEBUG_FTRACE(msg, args...) DEBUG ("[ftrace] " msg, ##args)
-/* Initialize the instruction iterator. */
-
-static void
-btrace_init_insn_iterator (struct btrace_thread_info *btinfo)
-{
- DEBUG ("init insn iterator");
-
- btinfo->insn_iterator.begin = 1;
- btinfo->insn_iterator.end = 0;
-}
-
-/* Initialize the function iterator. */
-
-static void
-btrace_init_func_iterator (struct btrace_thread_info *btinfo)
-{
- DEBUG ("init func iterator");
-
- btinfo->func_iterator.begin = 1;
- btinfo->func_iterator.end = 0;
-}
-
-/* Compute the instruction trace from the block trace. */
-
-static VEC (btrace_inst_s) *
-compute_itrace (VEC (btrace_block_s) *btrace)
-{
- VEC (btrace_inst_s) *itrace;
- struct gdbarch *gdbarch;
- unsigned int b;
-
- DEBUG ("compute itrace");
-
- itrace = NULL;
- gdbarch = target_gdbarch ();
- b = VEC_length (btrace_block_s, btrace);
-
- while (b-- != 0)
- {
- btrace_block_s *block;
- CORE_ADDR pc;
-
- block = VEC_index (btrace_block_s, btrace, b);
- pc = block->begin;
-
- /* Add instructions for this block. */
- for (;;)
- {
- btrace_inst_s *inst;
- int size;
-
- /* We should hit the end of the block. Warn if we went too far. */
- if (block->end < pc)
- {
- warning (_("Recorded trace may be corrupted."));
- break;
- }
-
- inst = VEC_safe_push (btrace_inst_s, itrace, NULL);
- inst->pc = pc;
-
- /* We're done once we pushed the instruction at the end. */
- if (block->end == pc)
- break;
-
- size = gdb_insn_length (gdbarch, pc);
-
- /* Make sure we terminate if we fail to compute the size. */
- if (size <= 0)
- {
- warning (_("Recorded trace may be incomplete."));
- break;
- }
-
- pc += size;
- }
- }
-
- return itrace;
-}
-
/* Return the function name of a recorded function segment for printing.
This function never returns NULL. */
static const char *
-ftrace_print_function_name (struct btrace_func *bfun)
+ftrace_print_function_name (const struct btrace_function *bfun)
{
struct minimal_symbol *msym;
struct symbol *sym;
@@ -151,7 +70,7 @@ ftrace_print_function_name (struct btrace_func *bfun)
This function never returns NULL. */
static const char *
-ftrace_print_filename (struct btrace_func *bfun)
+ftrace_print_filename (const struct btrace_function *bfun)
{
struct symbol *sym;
const char *filename;
@@ -166,44 +85,52 @@ ftrace_print_filename (struct btrace_func *bfun)
return filename;
}
-/* Print an ftrace debug status message. */
+/* Print the address of an instruction.
+ This function never returns NULL. */
-static void
-ftrace_debug (struct btrace_func *bfun, const char *prefix)
+static const char *
+ftrace_print_insn_addr (const struct btrace_insn *insn)
{
- DEBUG_FTRACE ("%s: fun = %s, file = %s, lines = [%d; %d], insn = [%u; %u]",
- prefix, ftrace_print_function_name (bfun),
- ftrace_print_filename (bfun), bfun->lbegin, bfun->lend,
- bfun->ibegin, bfun->iend);
+ if (insn == NULL)
+ return "<nil>";
+
+ return core_addr_to_string_nz (insn->pc);
}
-/* Initialize a recorded function segment. */
+/* Print an ftrace debug status message. */
static void
-ftrace_init_func (struct btrace_func *bfun, struct minimal_symbol *mfun,
- struct symbol *fun, unsigned int idx)
+ftrace_debug (const struct btrace_function *bfun, const char *prefix)
{
- bfun->msym = mfun;
- bfun->sym = fun;
- bfun->lbegin = INT_MAX;
- bfun->lend = 0;
- bfun->ibegin = idx;
- bfun->iend = idx;
+ const char *fun, *file;
+ unsigned int ibegin, iend;
+ int lbegin, lend, level;
+
+ fun = ftrace_print_function_name (bfun);
+ file = ftrace_print_filename (bfun);
+ level = bfun->level;
+
+ lbegin = bfun->lbegin;
+ lend = bfun->lend;
+
+ ibegin = bfun->insn_offset;
+ iend = ibegin + VEC_length (btrace_insn_s, bfun->insn);
+
+ DEBUG_FTRACE ("%s: fun = %s, file = %s, level = %d, lines = [%d; %d], "
+ "insn = [%u; %u)", prefix, fun, file, level, lbegin, lend,
+ ibegin, iend);
}
/* Check whether the function has changed. */
static int
-ftrace_function_switched (struct btrace_func *bfun,
- struct minimal_symbol *mfun, struct symbol *fun)
+ftrace_function_switched (const struct btrace_function *bfun,
+ const struct minimal_symbol *mfun,
+ const struct symbol *fun)
{
struct minimal_symbol *msym;
struct symbol *sym;
- /* The function changed if we did not have one before. */
- if (bfun == NULL)
- return 1;
-
msym = bfun->msym;
sym = bfun->sym;
@@ -228,6 +155,14 @@ ftrace_function_switched (struct btrace_func *bfun,
return 1;
}
+ /* If we lost symbol information, we switched functions. */
+ if (!(msym == NULL && sym == NULL) && mfun == NULL && fun == NULL)
+ return 1;
+
+ /* If we gained symbol information, we switched functions. */
+ if (msym == NULL && sym == NULL && !(mfun == NULL && fun == NULL))
+ return 1;
+
return 0;
}
@@ -236,7 +171,7 @@ ftrace_function_switched (struct btrace_func *bfun,
in another file is expanded in this function. */
static int
-ftrace_skip_file (struct btrace_func *bfun, const char *filename)
+ftrace_skip_file (const struct btrace_function *bfun, const char *filename)
{
struct symbol *sym;
const char *bfile;
@@ -254,83 +189,458 @@ ftrace_skip_file (struct btrace_func *bfun, const char *filename)
return (filename_cmp (bfile, filename) != 0);
}
-/* Compute the function trace from the instruction trace. */
+/* Allocate and initialize a new branch trace function segment. */
-static VEC (btrace_func_s) *
-compute_ftrace (VEC (btrace_inst_s) *itrace)
+static struct btrace_function *
+ftrace_new_function (struct btrace_function *prev,
+ struct minimal_symbol *mfun,
+ struct symbol *fun)
{
- VEC (btrace_func_s) *ftrace;
- struct btrace_inst *binst;
- struct btrace_func *bfun;
- unsigned int idx;
+ struct btrace_function *bfun;
- DEBUG ("compute ftrace");
+ bfun = xzalloc (sizeof (*bfun));
- ftrace = NULL;
- bfun = NULL;
+ bfun->msym = mfun;
+ bfun->sym = fun;
+ bfun->lbegin = INT_MAX;
+ bfun->flow.prev = prev;
- for (idx = 0; VEC_iterate (btrace_inst_s, itrace, idx, binst); ++idx)
+ if (prev != NULL)
{
- struct symtab_and_line sal;
- struct bound_minimal_symbol mfun;
- struct symbol *fun;
- const char *filename;
+ gdb_assert (prev->flow.next == NULL);
+ prev->flow.next = bfun;
+
+ bfun->number = prev->number + 1;
+ bfun->insn_offset = prev->insn_offset
+ + VEC_length (btrace_insn_s, prev->insn);
+ }
+
+ return bfun;
+}
+
+/* Update the UP field of a function segment. */
+
+static void
+ftrace_update_caller (struct btrace_function *bfun,
+ struct btrace_function *caller)
+{
+ if (bfun->up != NULL)
+ ftrace_debug (bfun, "updating caller");
+
+ bfun->up = caller;
+
+ ftrace_debug (bfun, "set caller");
+}
+
+/* Fix up the caller for a function segment. */
+
+static void
+ftrace_fixup_caller (struct btrace_function *bfun,
+ struct btrace_function *caller)
+{
+ struct btrace_function *prev, *next;
+
+ ftrace_update_caller (bfun, caller);
+
+ /* Update all function segments belonging to the same function. */
+ for (prev = bfun->segment.prev; prev != NULL; prev = prev->segment.prev)
+ ftrace_update_caller (prev, caller);
+
+ for (next = bfun->segment.next; next != NULL; next = next->segment.next)
+ ftrace_update_caller (next, caller);
+}
+
+/* Add a new function segment for a call. */
+
+static struct btrace_function *
+ftrace_new_call (struct btrace_function *caller,
+ struct minimal_symbol *mfun,
+ struct symbol *fun)
+{
+ struct btrace_function *bfun;
+
+ bfun = ftrace_new_function (caller, mfun, fun);
+ bfun->up = caller;
+ bfun->level = caller->level + 1;
+
+ ftrace_debug (bfun, "new call");
+
+ return bfun;
+}
+
+/* Add a new function segment for a tail call. */
+
+static struct btrace_function *
+ftrace_new_tailcall (struct btrace_function *caller,
+ struct minimal_symbol *mfun,
+ struct symbol *fun)
+{
+ struct btrace_function *bfun;
+
+ bfun = ftrace_new_function (caller, mfun, fun);
+ bfun->up = caller;
+ bfun->level = caller->level + 1;
+ bfun->flags |= bfun_up_links_to_tailcall;
+
+ ftrace_debug (bfun, "new tail call");
+
+ return bfun;
+}
+
+/* Find the caller of BFUN.
+ This is the first function segment up the call stack from BFUN with
+ MFUN/FUN symbol information. */
+
+static struct btrace_function *
+ftrace_find_caller (struct btrace_function *bfun,
+ struct minimal_symbol *mfun,
+ struct symbol *fun)
+{
+ for (; bfun != NULL; bfun = bfun->up)
+ {
+ /* Skip functions with incompatible symbol information. */
+ if (ftrace_function_switched (bfun, mfun, fun))
+ continue;
+
+ /* This is the function segment we're looking for. */
+ break;
+ }
+
+ return bfun;
+}
+
+/* Find the last actual call in the back trace of BFUN. */
+
+static struct btrace_function *
+ftrace_find_call (struct gdbarch *gdbarch, struct btrace_function *bfun)
+{
+ if (!gdbarch_insn_call_p_p (gdbarch))
+ return NULL;
+
+ for (; bfun != NULL; bfun = bfun->up)
+ {
+ struct btrace_insn *last;
CORE_ADDR pc;
- pc = binst->pc;
+ if (VEC_empty (btrace_insn_s, bfun->insn))
+ continue;
+
+ last = VEC_last (btrace_insn_s, bfun->insn);
+ pc = last->pc;
+
+ if (gdbarch_insn_call_p (gdbarch, pc))
+ break;
+ }
+
+ return bfun;
+}
+
+/* Add a new function segment for a return. */
+
+static struct btrace_function *
+ftrace_new_return (struct gdbarch *gdbarch,
+ struct btrace_function *prev,
+ struct minimal_symbol *mfun,
+ struct symbol *fun)
+{
+ struct btrace_function *bfun, *caller;
+
+ bfun = ftrace_new_function (prev, mfun, fun);
+
+ /* It is important to start at PREV's caller. Otherwise, we might find
+ PREV itself, if PREV is a recursive function. */
+ caller = ftrace_find_caller (prev->up, mfun, fun);
+ if (caller != NULL)
+ {
+ /* The caller of PREV is the preceding btrace function segment in this
+ function instance. */
+ gdb_assert (caller->segment.next == NULL);
+
+ caller->segment.next = bfun;
+ bfun->segment.prev = caller;
+
+ /* Maintain the function level. */
+ bfun->level = caller->level;
- /* Try to determine the function we're in. We use both types of symbols
- to avoid surprises when we sometimes get a full symbol and sometimes
- only a minimal symbol. */
- fun = find_pc_function (pc);
- mfun = lookup_minimal_symbol_by_pc (pc);
+ /* Maintain the call stack. */
+ bfun->up = caller->up;
- if (fun == NULL && mfun.minsym == NULL)
+ ftrace_debug (bfun, "new return");
+ }
+ else
+ {
+ /* We did not find a caller. This could mean that something went
+ wrong or that the call is simply not included in the trace. */
+
+ /* Let's search for some actual call. */
+ caller = ftrace_find_call (gdbarch, prev->up);
+ if (caller == NULL)
{
- DEBUG_FTRACE ("no symbol at %u, pc=%s", idx,
- core_addr_to_string_nz (pc));
- continue;
- }
+ /* There is no call in PREV's back trace. We assume that the
+ branch trace did not include it. */
+
+ /* Let's find the topmost call function - this skips tail calls. */
+ while (prev->up != NULL)
+ prev = prev->up;
- /* If we're switching functions, we start over. */
- if (ftrace_function_switched (bfun, mfun.minsym, fun))
+ /* We maintain levels for a series of returns for which we have
+ not seen the calls, but we restart at level 0, otherwise. */
+ bfun->level = min (0, prev->level) - 1;
+
+ /* Fix up the call stack for PREV. */
+ ftrace_fixup_caller (prev, bfun);
+ prev->flags |= bfun_up_links_to_ret;
+
+ ftrace_debug (bfun, "new return - no caller");
+ }
+ else
{
- bfun = VEC_safe_push (btrace_func_s, ftrace, NULL);
+ /* There is a call in PREV's back trace to which we should have
+ returned. Let's remain at this level. */
+ bfun->level = prev->level;
- ftrace_init_func (bfun, mfun.minsym, fun, idx);
- ftrace_debug (bfun, "init");
+ ftrace_debug (bfun, "new return - unknown caller");
}
+ }
+
+ return bfun;
+}
+
+/* Add a new function segment for a function switch. */
+
+static struct btrace_function *
+ftrace_new_switch (struct btrace_function *prev,
+ struct minimal_symbol *mfun,
+ struct symbol *fun,
+ const struct btrace_insn *insn)
+{
+ struct btrace_function *bfun;
+
+ /* This is an unexplained function switch. The call stack will likely
+ be wrong at this point. */
+ bfun = ftrace_new_function (prev, mfun, fun);
+
+ /* We keep the function level. */
+ bfun->level = prev->level;
+
+ ftrace_debug (bfun, "new switch");
- /* Update the instruction range. */
- bfun->iend = idx;
- ftrace_debug (bfun, "update insns");
+ return bfun;
+}
+
+/* Update the branch trace function segment. Never returns NULL. */
+
+static struct btrace_function *
+ftrace_update_function (struct gdbarch *gdbarch,
+ struct btrace_function *bfun, CORE_ADDR pc)
+{
+ struct bound_minimal_symbol bmfun;
+ struct minimal_symbol *mfun;
+ struct symbol *fun;
+ struct btrace_insn *last;
+
+ /* Try to determine the function we're in. We use both types of symbols
+ to avoid surprises when we sometimes get a full symbol and sometimes
+ only a minimal symbol. */
+ fun = find_pc_function (pc);
+ bmfun = lookup_minimal_symbol_by_pc (pc);
+ mfun = bmfun.minsym;
+
+ if (fun == NULL && mfun == NULL)
+ DEBUG_FTRACE ("no symbol at %s", core_addr_to_string_nz (pc));
+
+ /* If we didn't have a function before, we create one. */
+ if (bfun == NULL)
+ return ftrace_new_function (bfun, mfun, fun);
+
+ /* Check the last instruction, if we have one.
+ We do this check first, since it allows us to fill in the call stack
+ links in addition to the normal flow links. */
+ last = NULL;
+ if (!VEC_empty (btrace_insn_s, bfun->insn))
+ last = VEC_last (btrace_insn_s, bfun->insn);
- /* Let's see if we have source correlation, as well. */
- sal = find_pc_line (pc, 0);
- if (sal.symtab == NULL || sal.line == 0)
+ if (last != NULL)
+ {
+ CORE_ADDR lpc;
+
+ lpc = last->pc;
+
+ /* Check for returns. */
+ if (gdbarch_insn_ret_p_p (gdbarch) && gdbarch_insn_ret_p (gdbarch, lpc))
+ return ftrace_new_return (gdbarch, bfun, mfun, fun);
+
+ /* Check for calls. */
+ if (gdbarch_insn_call_p_p (gdbarch) && gdbarch_insn_call_p (gdbarch, lpc))
{
- DEBUG_FTRACE ("no lines at %u, pc=%s", idx,
- core_addr_to_string_nz (pc));
- continue;
+ int size;
+
+ size = gdb_insn_length (gdbarch, lpc);
+
+ /* Ignore calls to the next instruction. They are used for PIC. */
+ if (lpc + size != pc)
+ return ftrace_new_call (bfun, mfun, fun);
}
+ }
+
+ /* Check if we're switching functions for some other reason. */
+ if (ftrace_function_switched (bfun, mfun, fun))
+ {
+ DEBUG_FTRACE ("switching from %s in %s at %s",
+ ftrace_print_insn_addr (last),
+ ftrace_print_function_name (bfun),
+ ftrace_print_filename (bfun));
- /* Check if we switched files. This could happen if, say, a macro that
- is defined in another file is expanded here. */
- filename = symtab_to_fullname (sal.symtab);
- if (ftrace_skip_file (bfun, filename))
+ if (last != NULL)
{
- DEBUG_FTRACE ("ignoring file at %u, pc=%s, file=%s", idx,
- core_addr_to_string_nz (pc), filename);
- continue;
+ CORE_ADDR start, lpc;
+
+ /* If we have symbol information for our current location, use
+ it to check that we jump to the start of a function. */
+ if (fun != NULL || mfun != NULL)
+ start = get_pc_function_start (pc);
+ else
+ start = pc;
+
+ lpc = last->pc;
+
+ /* Jumps indicate optimized tail calls. */
+ if (start == pc
+ && gdbarch_insn_jump_p_p (gdbarch)
+ && gdbarch_insn_jump_p (gdbarch, lpc))
+ return ftrace_new_tailcall (bfun, mfun, fun);
}
- /* Update the line range. */
- bfun->lbegin = min (bfun->lbegin, sal.line);
- bfun->lend = max (bfun->lend, sal.line);
- ftrace_debug (bfun, "update lines");
+ return ftrace_new_switch (bfun, mfun, fun, last);
+ }
+
+ return bfun;
+}
+
+/* Update the source correlation for a branch trace function segment. */
+
+static void
+ftrace_update_lines (struct btrace_function *bfun, CORE_ADDR pc)
+{
+ struct symtab_and_line sal;
+ const char *filename;
+
+ sal = find_pc_line (pc, 0);
+ if (sal.symtab == NULL || sal.line == 0)
+ {
+ DEBUG_FTRACE ("no lines at %s", core_addr_to_string_nz (pc));
+ return;
+ }
+
+ /* Check if we switched files. This could happen if, say, a macro that
+ is defined in another file is expanded here. */
+ filename = symtab_to_fullname (sal.symtab);
+ if (ftrace_skip_file (bfun, filename))
+ {
+ DEBUG_FTRACE ("ignoring file at %s, file=%s",
+ core_addr_to_string_nz (pc), filename);
+ return;
}
- return ftrace;
+ /* Update the line range. */
+ bfun->lbegin = min (bfun->lbegin, sal.line);
+ bfun->lend = max (bfun->lend, sal.line);
+
+ if (record_debug > 1)
+ ftrace_debug (bfun, "update lines");
+}
+
+/* Update the instructions for a branch trace function segment. */
+
+static void
+ftrace_update_insns (struct btrace_function *bfun, CORE_ADDR pc)
+{
+ struct btrace_insn *insn;
+
+ insn = VEC_safe_push (btrace_insn_s, bfun->insn, NULL);
+ insn->pc = pc;
+
+ if (record_debug > 1)
+ ftrace_debug (bfun, "update insn");
+}
+
+/* Compute the function branch trace. */
+
+static void
+btrace_compute_ftrace (struct btrace_thread_info *btinfo,
+ VEC (btrace_block_s) *btrace)
+{
+ struct btrace_function *begin, *end;
+ struct gdbarch *gdbarch;
+ unsigned int blk;
+ int level;
+
+ DEBUG ("compute ftrace");
+
+ gdbarch = target_gdbarch ();
+ begin = NULL;
+ end = NULL;
+ level = INT_MAX;
+ blk = VEC_length (btrace_block_s, btrace);
+
+ while (blk != 0)
+ {
+ btrace_block_s *block;
+ CORE_ADDR pc;
+
+ blk -= 1;
+
+ block = VEC_index (btrace_block_s, btrace, blk);
+ pc = block->begin;
+
+ for (;;)
+ {
+ int size;
+
+ /* We should hit the end of the block. Warn if we went too far. */
+ if (block->end < pc)
+ {
+ warning (_("Recorded trace may be corrupted."));
+ break;
+ }
+
+ end = ftrace_update_function (gdbarch, end, pc);
+ if (begin == NULL)
+ begin = end;
+
+ /* Maintain the function level offset. */
+ level = min (level, end->level);
+
+ ftrace_update_insns (end, pc);
+ ftrace_update_lines (end, pc);
+
+ /* We're done once we pushed the instruction at the end. */
+ if (block->end == pc)
+ break;
+
+ size = gdb_insn_length (gdbarch, pc);
+
+ /* Make sure we terminate if we fail to compute the size. */
+ if (size <= 0)
+ {
+ warning (_("Recorded trace may be incomplete."));
+ break;
+ }
+
+ pc += size;
+ }
+ }
+
+ /* Add an empty dummy function to mark the end of the branch trace. */
+ end = ftrace_new_function (end, NULL, NULL);
+
+ btinfo->begin = begin;
+ btinfo->end = end;
+
+ /* LEVEL is the minimal function level of all btrace function segments.
+ Define the global level offset to -LEVEL so all function levels are
+ normalized to start at zero. */
+ btinfo->level = -level;
}
/* See btrace.h. */
@@ -394,6 +704,7 @@ btrace_fetch (struct thread_info *tp)
{
struct btrace_thread_info *btinfo;
VEC (btrace_block_s) *btrace;
+ struct cleanup *cleanup;
DEBUG ("fetch thread %d (%s)", tp->num, target_pid_to_str (tp->ptid));
@@ -402,18 +713,15 @@ btrace_fetch (struct thread_info *tp)
return;
btrace = target_read_btrace (btinfo->target, btrace_read_new);
- if (VEC_empty (btrace_block_s, btrace))
- return;
-
- btrace_clear (tp);
+ cleanup = make_cleanup (VEC_cleanup (btrace_block_s), &btrace);
- btinfo->btrace = btrace;
- btinfo->itrace = compute_itrace (btinfo->btrace);
- btinfo->ftrace = compute_ftrace (btinfo->itrace);
+ if (!VEC_empty (btrace_block_s, btrace))
+ {
+ btrace_clear (tp);
+ btrace_compute_ftrace (btinfo, btrace);
+ }
- /* Initialize branch trace iterators. */
- btrace_init_insn_iterator (btinfo);
- btrace_init_func_iterator (btinfo);
+ do_cleanups (cleanup);
}
/* See btrace.h. */
@@ -422,18 +730,29 @@ void
btrace_clear (struct thread_info *tp)
{
struct btrace_thread_info *btinfo;
+ struct btrace_function *it, *trash;
DEBUG ("clear thread %d (%s)", tp->num, target_pid_to_str (tp->ptid));
btinfo = &tp->btrace;
- VEC_free (btrace_block_s, btinfo->btrace);
- VEC_free (btrace_inst_s, btinfo->itrace);
- VEC_free (btrace_func_s, btinfo->ftrace);
+ it = btinfo->begin;
+ while (it != NULL)
+ {
+ trash = it;
+ it = it->flow.next;
+
+ xfree (trash);
+ }
+
+ btinfo->begin = NULL;
+ btinfo->end = NULL;
- btinfo->btrace = NULL;
- btinfo->itrace = NULL;
- btinfo->ftrace = NULL;
+ xfree (btinfo->insn_history);
+ xfree (btinfo->call_history);
+
+ btinfo->insn_history = NULL;
+ btinfo->call_history = NULL;
}
/* See btrace.h. */
@@ -541,3 +860,301 @@ parse_xml_btrace (const char *buffer)
return btrace;
}
+
+/* See btrace.h. */
+
+const struct btrace_insn *
+btrace_insn_get (const struct btrace_insn_iterator *it)
+{
+ struct btrace_function *function;
+ unsigned int index, end;
+
+ if (it == NULL)
+ return NULL;
+
+ index = it->index;
+ function = it->function;
+ if (function == NULL)
+ return NULL;
+
+ end = VEC_length (btrace_insn_s, function->insn);
+ if (end == 0)
+ return NULL;
+
+ gdb_assert (index < end);
+
+ return VEC_index (btrace_insn_s, function->insn, index);
+}
+
+/* See btrace.h. */
+
+unsigned int
+btrace_insn_number (const struct btrace_insn_iterator *it)
+{
+ struct btrace_function *function;
+
+ if (it == NULL)
+ return 0;
+
+ function = it->function;
+ if (function == NULL)
+ return 0;
+
+ return function->insn_offset + it->index;
+}
+
+/* See btrace.h. */
+
+void
+btrace_insn_begin (struct btrace_insn_iterator *it,
+ struct btrace_thread_info *btinfo)
+{
+ struct btrace_function *begin;
+
+ begin = btinfo->begin;
+ if (begin == NULL)
+ error (_("No trace."));
+
+ it->function = begin;
+ it->index = 0;
+}
+
+/* See btrace.h. */
+
+void
+btrace_insn_end (struct btrace_insn_iterator *it,
+ struct btrace_thread_info *btinfo)
+{
+ struct btrace_function *end;
+
+ end = btinfo->end;
+ if (end == NULL)
+ error (_("No trace."));
+
+ /* The last function is an empty dummy. */
+ it->function = end;
+ it->index = 0;
+}
+
+/* See btrace.h. */
+
+unsigned int
+btrace_insn_next (struct btrace_insn_iterator * it, unsigned int stride)
+{
+ struct btrace_function *function;
+ unsigned int index, end, space, adv, steps;
+
+ if (it == NULL)
+ return 0;
+
+ function = it->function;
+ if (function == NULL)
+ return 0;
+
+ steps = 0;
+ index = it->index;
+
+ while (stride != 0)
+ {
+ end = VEC_length (btrace_insn_s, function->insn);
+
+ /* Compute the number of instructions remaining in this segment. */
+ gdb_assert ((end == 0 && index == 0) || index < end);
+ space = end - index;
+
+ /* Advance the iterator as far as possible within this segment. */
+ adv = min (space, stride);
+ stride -= adv;
+ index += adv;
+ steps += adv;
+
+ /* Move to the next function if we're at the end of this one. */
+ if (index == end)
+ {
+ struct btrace_function *next;
+
+ next = function->flow.next;
+ if (next == NULL)
+ {
+ /* We stepped past the last function - an empty dummy. */
+ gdb_assert (adv == 0);
+ break;
+ }
+
+ /* We now point to the first instruction in the new function. */
+ function = next;
+ index = 0;
+ }
+
+ /* We did make progress. */
+ gdb_assert (adv > 0);
+ }
+
+ /* Update the iterator. */
+ it->function = function;
+ it->index = index;
+
+ return steps;
+}
+
+/* See btrace.h. */
+
+unsigned int
+btrace_insn_prev (struct btrace_insn_iterator * it, unsigned int stride)
+{
+ struct btrace_function *function;
+ unsigned int index, adv, steps;
+
+ if (it == NULL)
+ return 0;
+
+ function = it->function;
+ if (function == NULL)
+ return 0;
+
+ steps = 0;
+ index = it->index;
+
+ while (stride != 0)
+ {
+ /* Move to the previous function if we're at the start of this one. */
+ if (index == 0)
+ {
+ struct btrace_function *prev;
+
+ prev = function->flow.prev;
+ if (prev == NULL)
+ break;
+
+ /* We point to one after the last instruction in the new function. */
+ function = prev;
+ index = VEC_length (btrace_insn_s, function->insn);
+
+ /* There is at least one instruction in this function segment. */
+ gdb_assert (index > 0);
+ }
+
+ /* Advance the iterator as far as possible within this segment. */
+ adv = min (index, stride);
+ stride -= adv;
+ index -= adv;
+ steps += adv;
+
+ /* We did make progress. */
+ gdb_assert (adv > 0);
+ }
+
+ /* Update the iterator. */
+ it->function = function;
+ it->index = index;
+
+ return steps;
+}
+
+/* See btrace.h. */
+
+int
+btrace_insn_cmp (const struct btrace_insn_iterator *lhs,
+ const struct btrace_insn_iterator *rhs)
+{
+ unsigned int lnum, rnum;
+
+ lnum = btrace_insn_number (lhs);
+ rnum = btrace_insn_number (rhs);
+
+ return (int) (lnum - rnum);
+}
+
+/* See btrace.h. */
+
+int
+btrace_find_insn_by_number (struct btrace_insn_iterator *it,
+ const struct btrace_thread_info *btinfo,
+ unsigned int number)
+{
+ struct btrace_function *bfun;
+ unsigned int last;
+
+ for (bfun = btinfo->end; bfun != NULL; bfun = bfun->flow.prev)
+ if (bfun->insn_offset <= number)
+ break;
+
+ if (bfun == NULL)
+ return 0;
+
+ last = bfun->insn_offset + VEC_length (btrace_insn_s, bfun->insn);
+ if (last <= number)
+ return 0;
+
+ it->function = bfun;
+ it->index = number - bfun->insn_offset;
+
+ return 1;
+}
+
+/* See btrace.h. */
+
+struct btrace_function *
+btrace_find_function_by_number (const struct btrace_thread_info *btinfo,
+ unsigned int number)
+{
+ struct btrace_function *bfun;
+
+ if (btinfo == NULL)
+ return NULL;
+
+ for (bfun = btinfo->end; bfun != NULL; bfun = bfun->flow.prev)
+ {
+ unsigned int bnum;
+
+ bnum = bfun->number;
+ if (number == bnum)
+ return bfun;
+
+ /* Functions are ordered and numbered consecutively. We could bail out
+ earlier. On the other hand, it is very unlikely that we search for
+ a nonexistent function. */
+ }
+
+ return NULL;
+}
+
+/* See btrace.h. */
+
+void
+btrace_set_insn_history (struct btrace_thread_info *btinfo,
+ struct btrace_insn_iterator *begin,
+ struct btrace_insn_iterator *end)
+{
+ struct btrace_insn_history *history;
+
+ history = btinfo->insn_history;
+ if (history == NULL)
+ {
+ history = xzalloc (sizeof (*history));
+ btinfo->insn_history = history;
+ }
+
+ history->begin = *begin;
+ history->end = *end;
+}
+
+/* See btrace.h. */
+
+void
+btrace_set_call_history (struct btrace_thread_info *btinfo,
+ struct btrace_function *begin,
+ struct btrace_function *end)
+{
+ struct btrace_call_history *history;
+
+ history = btinfo->call_history;
+ if (history == NULL)
+ {
+ history = xzalloc (sizeof (*history));
+ btinfo->call_history = history;
+ }
+
+ history->begin = begin;
+ history->end = end;
+}
diff --git a/gdb/btrace.h b/gdb/btrace.h
index bd8425d..ac7acdb 100644
--- a/gdb/btrace.h
+++ b/gdb/btrace.h
@@ -29,63 +29,106 @@
#include "btrace-common.h"
struct thread_info;
+struct btrace_function;
/* A branch trace instruction.
This represents a single instruction in a branch trace. */
-struct btrace_inst
+struct btrace_insn
{
/* The address of this instruction. */
CORE_ADDR pc;
};
-/* A branch trace function.
+/* A vector of branch trace instructions. */
+typedef struct btrace_insn btrace_insn_s;
+DEF_VEC_O (btrace_insn_s);
+
+/* A doubly-linked list of branch trace function segments. */
+struct btrace_func_link
+{
+ struct btrace_function *prev;
+ struct btrace_function *next;
+};
+
+/* Flags for btrace function segments. */
+enum btrace_function_flag
+{
+ /* The 'up' link interpretation.
+ If set, it points to the function segment we returned to.
+ If clear, it points to the function segment we called from. */
+ bfun_up_links_to_ret = (1 << 0),
+
+ /* The 'up' link points to a tail call. This obviously only makes sense
+ if bfun_up_links_to_ret is clear. */
+ bfun_up_links_to_tailcall = (1 << 1)
+};
+
+/* A branch trace function segment.
This represents a function segment in a branch trace, i.e. a consecutive
number of instructions belonging to the same function. */
-struct btrace_func
+struct btrace_function
{
/* The full and minimal symbol for the function. One of them may be NULL. */
struct minimal_symbol *msym;
struct symbol *sym;
+ /* The previous and next segment belonging to the same function. */
+ struct btrace_func_link segment;
+
+ /* The previous and next function in control flow order. */
+ struct btrace_func_link flow;
+
+ /* The directly preceding function segment in a (fake) call stack. */
+ struct btrace_function *up;
+
+ /* The instructions in this function segment. */
+ VEC (btrace_insn_s) *insn;
+
+ /* The instruction number offset for the first instruction in this
+ function segment. */
+ unsigned int insn_offset;
+
+ /* The function number. */
+ unsigned int number;
+
+ /* The function level. */
+ int level;
+
/* The source line range of this function segment (both inclusive). */
int lbegin, lend;
- /* The instruction number range in the instruction trace corresponding
- to this function segment (both inclusive). */
- unsigned int ibegin, iend;
+ /* A bit-vector of btrace_function_flag. */
+ unsigned int flags;
};
-/* Branch trace may also be represented as a vector of:
-
- - branch trace instructions starting with the oldest instruction.
- - branch trace functions starting with the oldest function. */
-typedef struct btrace_inst btrace_inst_s;
-typedef struct btrace_func btrace_func_s;
+/* A branch trace instruction iterator. */
+struct btrace_insn_iterator
+{
+ /* The branch trace function segment containing the instruction. */
+ struct btrace_function *function;
-/* Define functions operating on branch trace vectors. */
-DEF_VEC_O (btrace_inst_s);
-DEF_VEC_O (btrace_func_s);
+ /* The index into the function segment's instruction vector. */
+ unsigned int index;
+};
/* Branch trace iteration state for "record instruction-history". */
-struct btrace_insn_iterator
+struct btrace_insn_history
{
- /* The instruction index range from begin (inclusive) to end (exclusive)
- that has been covered last time.
- If end < begin, the branch trace has just been updated. */
- unsigned int begin;
- unsigned int end;
+ /* The branch trace instruction range from begin (inclusive) to
+ end (exclusive) that has been covered last time. */
+ struct btrace_insn_iterator begin;
+ struct btrace_insn_iterator end;
};
/* Branch trace iteration state for "record function-call-history". */
-struct btrace_func_iterator
+struct btrace_call_history
{
- /* The function index range from begin (inclusive) to end (exclusive)
- that has been covered last time.
- If end < begin, the branch trace has just been updated. */
- unsigned int begin;
- unsigned int end;
+ /* The branch trace function range from begin (inclusive) to end (exclusive)
+ that has been covered last time. */
+ struct btrace_function *begin;
+ struct btrace_function *end;
};
/* Branch trace information per thread.
@@ -104,15 +147,19 @@ struct btrace_thread_info
struct btrace_target_info *target;
/* The current branch trace for this thread. */
- VEC (btrace_block_s) *btrace;
- VEC (btrace_inst_s) *itrace;
- VEC (btrace_func_s) *ftrace;
+ struct btrace_function *begin;
+ struct btrace_function *end;
+
+ /* The function level offset. When added to each function's level,
+ this normalizes the function levels such that the smallest level
+ becomes zero. */
+ int level;
/* The instruction history iterator. */
- struct btrace_insn_iterator insn_iterator;
+ struct btrace_insn_history *insn_history;
/* The function call history iterator. */
- struct btrace_func_iterator func_iterator;
+ struct btrace_call_history *call_history;
};
/* Enable branch tracing for a thread. */
@@ -139,4 +186,60 @@ extern void btrace_free_objfile (struct objfile *);
/* Parse a branch trace xml document into a block vector. */
extern VEC (btrace_block_s) *parse_xml_btrace (const char*);
+/* Dereference a branch trace instruction iterator. Return a pointer to the
+ instruction the iterator points to or NULL if the interator does not point
+ to a valid instruction. */
+extern const struct btrace_insn *
+btrace_insn_get (const struct btrace_insn_iterator *);
+
+/* Return the instruction number for a branch trace iterator. Returns zero
+ if the iterator does not point to a valid instruction. */
+extern unsigned int btrace_insn_number (const struct btrace_insn_iterator *);
+
+/* Initialize a branch trace instruction iterator to point to the begin/end of
+ the branch trace. Throws an error if there is no branch trace. */
+extern void btrace_insn_begin (struct btrace_insn_iterator *,
+ struct btrace_thread_info *);
+extern void btrace_insn_end (struct btrace_insn_iterator *,
+ struct btrace_thread_info *);
+
+/* Increment/decrement a branch trace instruction iterator. Return the number
+ of instructions by which the instruction iterator has been advanced.
+ Returns zero, if the operation failed. */
+extern unsigned int btrace_insn_next (struct btrace_insn_iterator *,
+ unsigned int stride);
+extern unsigned int btrace_insn_prev (struct btrace_insn_iterator *,
+ unsigned int stride);
+
+/* Compare two branch trace instruction iterators.
+ Return a negative number if LHS < RHS.
+ Return zero if LHS == RHS.
+ Return a positive number if LHS > RHS. */
+extern int btrace_insn_cmp (const struct btrace_insn_iterator *lhs,
+ const struct btrace_insn_iterator *rhs);
+
+/* Find an instruction in the function branch trace by its number.
+ If the instruction is found, initialize the branch trace instruction
+ iterator to point to this instruction and return 1.
+ Return 0, otherwise. */
+extern int btrace_find_insn_by_number (struct btrace_insn_iterator *,
+ const struct btrace_thread_info *,
+ unsigned int number);
+
+/* Find a function in the function branch trace by its number.
+ Return a pointer to that function or NULL if no such function is found. */
+extern struct btrace_function *
+btrace_find_function_by_number (const struct btrace_thread_info *,
+ unsigned int number);
+
+/* Set the branch trace instruction history to [BEGIN; END). */
+extern void btrace_set_insn_history (struct btrace_thread_info *,
+ struct btrace_insn_iterator *begin,
+ struct btrace_insn_iterator *end);
+
+/* Set the branch trace function call history to [BEGIN; END). */
+extern void btrace_set_call_history (struct btrace_thread_info *,
+ struct btrace_function *begin,
+ struct btrace_function *end);
+
#endif /* BTRACE_H */
diff --git a/gdb/record-btrace.c b/gdb/record-btrace.c
index 8fb413e..e2506a8 100644
--- a/gdb/record-btrace.c
+++ b/gdb/record-btrace.c
@@ -74,7 +74,7 @@ require_btrace (void)
btinfo = &tp->btrace;
- if (VEC_empty (btrace_inst_s, btinfo->itrace))
+ if (btinfo->begin == NULL)
error (_("No trace."));
return btinfo;
@@ -205,6 +205,7 @@ static void
record_btrace_info (void)
{
struct btrace_thread_info *btinfo;
+ struct btrace_function *bfun;
struct thread_info *tp;
unsigned int insts, funcs;
@@ -217,8 +218,15 @@ record_btrace_info (void)
btrace_fetch (tp);
btinfo = &tp->btrace;
- insts = VEC_length (btrace_inst_s, btinfo->itrace);
- funcs = VEC_length (btrace_func_s, btinfo->ftrace);
+ bfun = btinfo->end;
+ insts = 0;
+ funcs = 0;
+
+ if (bfun != NULL)
+ {
+ funcs = bfun->number;
+ insts = bfun->insn_offset + VEC_length (btrace_insn_s, bfun->insn);
+ }
printf_unfiltered (_("Recorded %u instructions in %u functions for thread "
"%d (%s).\n"), insts, funcs, tp->num,
@@ -236,27 +244,32 @@ ui_out_field_uint (struct ui_out *uiout, const char *fld, unsigned int val)
/* Disassemble a section of the recorded instruction trace. */
static void
-btrace_insn_history (struct btrace_thread_info *btinfo, struct ui_out *uiout,
- unsigned int begin, unsigned int end, int flags)
+btrace_insn_history (struct ui_out *uiout,
+ const struct btrace_insn_iterator *begin,
+ const struct btrace_insn_iterator *end, int flags)
{
struct gdbarch *gdbarch;
- struct btrace_inst *inst;
- unsigned int idx;
+ struct btrace_insn *inst;
+ struct btrace_insn_iterator it;
- DEBUG ("itrace (0x%x): [%u; %u[", flags, begin, end);
+ DEBUG ("itrace (0x%x): [%u; %u)", flags, btrace_insn_number (begin),
+ btrace_insn_number (end));
gdbarch = target_gdbarch ();
- for (idx = begin; VEC_iterate (btrace_inst_s, btinfo->itrace, idx, inst)
- && idx < end; ++idx)
+ for (it = *begin; btrace_insn_cmp (&it, end) < 0; btrace_insn_next (&it, 1))
{
+ const struct btrace_insn *insn;
+
+ insn = btrace_insn_get (&it);
+
/* Print the instruction index. */
- ui_out_field_uint (uiout, "index", idx);
+ ui_out_field_uint (uiout, "index", btrace_insn_number (&it));
ui_out_text (uiout, "\t");
/* Disassembly with '/m' flag may not produce the expected result.
See PR gdb/11833. */
- gdb_disassembly (gdbarch, uiout, NULL, flags, 1, inst->pc, inst->pc + 1);
+ gdb_disassembly (gdbarch, uiout, NULL, flags, 1, insn->pc, insn->pc + 1);
}
}
@@ -266,72 +279,60 @@ static void
record_btrace_insn_history (int size, int flags)
{
struct btrace_thread_info *btinfo;
+ struct btrace_insn_history *history;
+ struct btrace_insn_iterator begin, end;
struct cleanup *uiout_cleanup;
struct ui_out *uiout;
- unsigned int context, last, begin, end;
+ unsigned int context, covered;
uiout = current_uiout;
uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
"insn history");
btinfo = require_btrace ();
- last = VEC_length (btrace_inst_s, btinfo->itrace);
-
context = abs (size);
- begin = btinfo->insn_iterator.begin;
- end = btinfo->insn_iterator.end;
-
- DEBUG ("insn-history (0x%x): %d, prev: [%u; %u[", flags, size, begin, end);
-
if (context == 0)
error (_("Bad record instruction-history-size."));
- /* We start at the end. */
- if (end < begin)
+ history = btinfo->insn_history;
+ if (history == NULL)
{
- /* Truncate the context, if necessary. */
- context = min (context, last);
+ /* No matter the direction, we start with the tail of the trace. */
+ btrace_insn_end (&begin, btinfo);
+ end = begin;
- end = last;
- begin = end - context;
+ covered = btrace_insn_prev (&begin, context);
}
- else if (size < 0)
+ else
{
- if (begin == 0)
- {
- printf_unfiltered (_("At the start of the branch trace record.\n"));
+ begin = history->begin;
+ end = history->end;
- btinfo->insn_iterator.end = 0;
- return;
- }
-
- /* Truncate the context, if necessary. */
- context = min (context, begin);
+ DEBUG ("insn-history (0x%x): %d, prev: [%u; %u)", flags, size,
+ btrace_insn_number (&begin), btrace_insn_number (&end));
- end = begin;
- begin -= context;
- }
- else
- {
- if (end == last)
+ if (size < 0)
{
- printf_unfiltered (_("At the end of the branch trace record.\n"));
-
- btinfo->insn_iterator.begin = last;
- return;
+ end = begin;
+ covered = btrace_insn_prev (&begin, context);
+ }
+ else
+ {
+ begin = end;
+ covered = btrace_insn_next (&end, context);
}
-
- /* Truncate the context, if necessary. */
- context = min (context, last - end);
-
- begin = end;
- end += context;
}
- btrace_insn_history (btinfo, uiout, begin, end, flags);
-
- btinfo->insn_iterator.begin = begin;
- btinfo->insn_iterator.end = end;
+ if (covered > 0)
+ btrace_insn_history (uiout, &begin, &end, flags);
+ else
+ {
+ if (size < 0)
+ printf_unfiltered (_("At the start of the branch trace record.\n"));
+ else
+ printf_unfiltered (_("At the end of the branch trace record.\n"));
+ }
+ btrace_set_insn_history (btinfo, &begin, &end);
do_cleanups (uiout_cleanup);
}
@@ -341,39 +342,41 @@ static void
record_btrace_insn_history_range (ULONGEST from, ULONGEST to, int flags)
{
struct btrace_thread_info *btinfo;
+ struct btrace_insn_history *history;
+ struct btrace_insn_iterator begin, end;
struct cleanup *uiout_cleanup;
struct ui_out *uiout;
- unsigned int last, begin, end;
+ unsigned int low, high;
+ int found;
uiout = current_uiout;
uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
"insn history");
- btinfo = require_btrace ();
- last = VEC_length (btrace_inst_s, btinfo->itrace);
-
- begin = (unsigned int) from;
- end = (unsigned int) to;
+ low = (unsigned int) from;
+ high = (unsigned int) to;
- DEBUG ("insn-history (0x%x): [%u; %u[", flags, begin, end);
+ DEBUG ("insn-history (0x%x): [%u; %u)", flags, low, high);
/* Check for wrap-arounds. */
- if (begin != from || end != to)
+ if (low != from || high != to)
error (_("Bad range."));
- if (end <= begin)
+ if (high <= low)
error (_("Bad range."));
- if (last <= begin)
- error (_("Range out of bounds."));
+ btinfo = require_btrace ();
- /* Truncate the range, if necessary. */
- if (last < end)
- end = last;
+ found = btrace_find_insn_by_number (&begin, btinfo, low);
+ if (found == 0)
+ error (_("Range out of bounds."));
- btrace_insn_history (btinfo, uiout, begin, end, flags);
+ /* Silently truncate the range, if necessary. */
+ found = btrace_find_insn_by_number (&end, btinfo, high);
+ if (found == 0)
+ btrace_insn_end (&end, btinfo);
- btinfo->insn_iterator.begin = begin;
- btinfo->insn_iterator.end = end;
+ btrace_insn_history (uiout, &begin, &end, flags);
+ btrace_set_insn_history (btinfo, &begin, &end);
do_cleanups (uiout_cleanup);
}
@@ -412,23 +415,27 @@ record_btrace_insn_history_from (ULONGEST from, int size, int flags)
/* Print the instruction number range for a function call history line. */
static void
-btrace_func_history_insn_range (struct ui_out *uiout, struct btrace_func *bfun)
+btrace_call_history_insn_range (struct ui_out *uiout,
+ const struct btrace_function *bfun)
{
- ui_out_field_uint (uiout, "insn begin", bfun->ibegin);
+ unsigned int begin, end;
- if (bfun->ibegin == bfun->iend)
- return;
+ begin = bfun->insn_offset;
+ end = begin + VEC_length (btrace_insn_s, bfun->insn);
+ ui_out_field_uint (uiout, "insn begin", begin);
ui_out_text (uiout, "-");
- ui_out_field_uint (uiout, "insn end", bfun->iend);
+ ui_out_field_uint (uiout, "insn end", end);
}
/* Print the source line information for a function call history line. */
static void
-btrace_func_history_src_line (struct ui_out *uiout, struct btrace_func *bfun)
+btrace_call_history_src_line (struct ui_out *uiout,
+ const struct btrace_function *bfun)
{
struct symbol *sym;
+ int begin, end;
sym = bfun->sym;
if (sym == NULL)
@@ -437,130 +444,183 @@ btrace_func_history_src_line (struct ui_out *uiout, struct btrace_func *bfun)
ui_out_field_string (uiout, "file",
symtab_to_filename_for_display (sym->symtab));
- if (bfun->lend == 0)
+ begin = bfun->lbegin;
+ end = bfun->lend;
+
+ if (end == 0)
return;
ui_out_text (uiout, ":");
- ui_out_field_int (uiout, "min line", bfun->lbegin);
+ ui_out_field_int (uiout, "min line", begin);
- if (bfun->lend == bfun->lbegin)
+ if (end == begin)
return;
ui_out_text (uiout, "-");
- ui_out_field_int (uiout, "max line", bfun->lend);
+ ui_out_field_int (uiout, "max line", end);
}
/* Disassemble a section of the recorded function trace. */
static void
-btrace_func_history (struct btrace_thread_info *btinfo, struct ui_out *uiout,
- unsigned int begin, unsigned int end,
+btrace_call_history (struct ui_out *uiout,
+ const struct btrace_function *begin,
+ const struct btrace_function *end,
enum record_print_flag flags)
{
- struct btrace_func *bfun;
- unsigned int idx;
+ const struct btrace_function *bfun;
- DEBUG ("ftrace (0x%x): [%u; %u[", flags, begin, end);
+ DEBUG ("ftrace (0x%x): [%u; %u)", flags, begin->number, end->number);
- for (idx = begin; VEC_iterate (btrace_func_s, btinfo->ftrace, idx, bfun)
- && idx < end; ++idx)
+ for (bfun = begin; bfun != end; bfun = bfun->flow.next)
{
+ struct minimal_symbol *msym;
+ struct symbol *sym;
+
+ msym = bfun->msym;
+ sym = bfun->sym;
+
/* Print the function index. */
- ui_out_field_uint (uiout, "index", idx);
+ ui_out_field_uint (uiout, "index", bfun->number);
ui_out_text (uiout, "\t");
if ((flags & record_print_insn_range) != 0)
{
- btrace_func_history_insn_range (uiout, bfun);
+ btrace_call_history_insn_range (uiout, bfun);
ui_out_text (uiout, "\t");
}
if ((flags & record_print_src_line) != 0)
{
- btrace_func_history_src_line (uiout, bfun);
+ btrace_call_history_src_line (uiout, bfun);
ui_out_text (uiout, "\t");
}
- if (bfun->sym != NULL)
- ui_out_field_string (uiout, "function", SYMBOL_PRINT_NAME (bfun->sym));
- else if (bfun->msym != NULL)
- ui_out_field_string (uiout, "function", SYMBOL_PRINT_NAME (bfun->msym));
+ if (sym != NULL)
+ ui_out_field_string (uiout, "function", SYMBOL_PRINT_NAME (sym));
+ else if (msym != NULL)
+ ui_out_field_string (uiout, "function", SYMBOL_PRINT_NAME (msym));
+
ui_out_text (uiout, "\n");
}
}
+/* Decrement a btrace function iterator. Return the number of functions
+ by which the iterator has been decremented.
+ Returns zero, if the operation failed. */
+
+static unsigned int
+btrace_func_prev (struct btrace_function **it, unsigned int stride)
+{
+ struct btrace_function *bfun;
+ unsigned int covered;
+
+ bfun = *it;
+ covered = 0;
+ while (covered < stride)
+ {
+ struct btrace_function *prev;
+
+ prev = bfun->flow.prev;
+ if (prev == NULL)
+ break;
+
+ bfun = prev;
+ covered += 1;
+ }
+
+ *it = bfun;
+ return covered;
+}
+
+/* Increment a btrace function iterator. Return the number of functions
+ by which the iterator has been incremented.
+ Returns zero, if the operation failed. */
+
+static unsigned int
+btrace_func_next (struct btrace_function **it, unsigned int stride)
+{
+ struct btrace_function *bfun;
+ unsigned int covered;
+
+ bfun = *it;
+ covered = 0;
+ while (covered < stride)
+ {
+ struct btrace_function *next;
+
+ next = bfun->flow.next;
+ if (next == NULL)
+ break;
+
+ bfun = next;
+ covered += 1;
+ }
+
+ *it = bfun;
+ return covered;
+}
+
/* The to_call_history method of target record-btrace. */
static void
record_btrace_call_history (int size, int flags)
{
struct btrace_thread_info *btinfo;
+ struct btrace_call_history *history;
+ struct btrace_function *begin, *end;
struct cleanup *uiout_cleanup;
struct ui_out *uiout;
- unsigned int context, last, begin, end;
+ unsigned int context, covered;
uiout = current_uiout;
uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
"insn history");
- btinfo = require_btrace ();
- last = VEC_length (btrace_func_s, btinfo->ftrace);
-
context = abs (size);
- begin = btinfo->func_iterator.begin;
- end = btinfo->func_iterator.end;
-
- DEBUG ("func-history (0x%x): %d, prev: [%u; %u[", flags, size, begin, end);
-
if (context == 0)
error (_("Bad record function-call-history-size."));
- /* We start at the end. */
- if (end < begin)
+ btinfo = require_btrace ();
+ history = btinfo->call_history;
+ if (history == NULL)
{
- /* Truncate the context, if necessary. */
- context = min (context, last);
+ /* No matter the direction, we start with the tail of the trace. */
+ begin = btinfo->end;
+ end = begin;
- end = last;
- begin = end - context;
+ covered = btrace_func_prev (&begin, context);
}
- else if (size < 0)
+ else
{
- if (begin == 0)
- {
- printf_unfiltered (_("At the start of the branch trace record.\n"));
+ begin = history->begin;
+ end = history->end;
- btinfo->func_iterator.end = 0;
- return;
- }
-
- /* Truncate the context, if necessary. */
- context = min (context, begin);
+ DEBUG ("call-history (0x%x): %d, prev: [%u; %u[", flags, size,
+ begin->number, end->number);
- end = begin;
- begin -= context;
- }
- else
- {
- if (end == last)
+ if (size < 0)
{
- printf_unfiltered (_("At the end of the branch trace record.\n"));
-
- btinfo->func_iterator.begin = last;
- return;
+ end = begin;
+ covered = btrace_func_prev (&begin, context);
+ }
+ else
+ {
+ begin = end;
+ covered = btrace_func_next (&end, context);
}
-
- /* Truncate the context, if necessary. */
- context = min (context, last - end);
-
- begin = end;
- end += context;
}
- btrace_func_history (btinfo, uiout, begin, end, flags);
-
- btinfo->func_iterator.begin = begin;
- btinfo->func_iterator.end = end;
+ if (covered > 0)
+ btrace_call_history (uiout, begin, end, flags);
+ else
+ {
+ if (size < 0)
+ printf_unfiltered (_("At the start of the branch trace record.\n"));
+ else
+ printf_unfiltered (_("At the end of the branch trace record.\n"));
+ }
+ btrace_set_call_history (btinfo, begin, end);
do_cleanups (uiout_cleanup);
}
@@ -570,39 +630,40 @@ static void
record_btrace_call_history_range (ULONGEST from, ULONGEST to, int flags)
{
struct btrace_thread_info *btinfo;
+ struct btrace_call_history *history;
+ struct btrace_function *begin, *end;
struct cleanup *uiout_cleanup;
struct ui_out *uiout;
- unsigned int last, begin, end;
+ unsigned int low, high;
uiout = current_uiout;
uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
"func history");
- btinfo = require_btrace ();
- last = VEC_length (btrace_func_s, btinfo->ftrace);
-
- begin = (unsigned int) from;
- end = (unsigned int) to;
+ low = (unsigned int) from;
+ high = (unsigned int) to;
- DEBUG ("func-history (0x%x): [%u; %u[", flags, begin, end);
+ DEBUG ("call-history (0x%x): [%u; %u[", flags, low, high);
/* Check for wrap-arounds. */
- if (begin != from || end != to)
+ if (low != from || high != to)
error (_("Bad range."));
- if (end <= begin)
+ if (high <= low)
error (_("Bad range."));
- if (last <= begin)
- error (_("Range out of bounds."));
+ btinfo = require_btrace ();
- /* Truncate the range, if necessary. */
- if (last < end)
- end = last;
+ begin = btrace_find_function_by_number (btinfo, low);
+ if (begin == NULL)
+ error (_("Range out of bounds."));
- btrace_func_history (btinfo, uiout, begin, end, flags);
+ /* Silently truncate the range, if necessary. */
+ end = btrace_find_function_by_number (btinfo, high);
+ if (end == NULL)
+ end = btinfo->end;
- btinfo->func_iterator.begin = begin;
- btinfo->func_iterator.end = end;
+ btrace_call_history (uiout, begin, end, flags);
+ btrace_set_call_history (btinfo, begin, end);
do_cleanups (uiout_cleanup);
}
diff --git a/gdb/testsuite/gdb.btrace/function_call_history.exp b/gdb/testsuite/gdb.btrace/function_call_history.exp
index 97447e1..7658637 100644
--- a/gdb/testsuite/gdb.btrace/function_call_history.exp
+++ b/gdb/testsuite/gdb.btrace/function_call_history.exp
@@ -204,16 +204,18 @@ set bp_location [gdb_get_line_number "bp.2" $testfile.c]
gdb_breakpoint $bp_location
gdb_continue_to_breakpoint "cont to $bp_location" ".*$testfile.c:$bp_location.*"
-# at this point we expect to have main, fib, ..., fib, main, where fib occurs 8 times,
-# so we limit the output to only show the latest 10 function calls
-gdb_test_no_output "set record function-call-history-size 10"
-set message "show recursive function call history"
-gdb_test_multiple "record function-call-history" $message {
- -re "13\tmain\r\n14\tfib\r\n15\tfib\r\n16\tfib\r\n17\tfib\r\n18\tfib\r\n19\tfib\r\n20\tfib\r\n21\tfib\r\n22 main\r\n$gdb_prompt $" {
- pass $message
- }
- -re "13\tinc\r\n14\tmain\r\n15\tinc\r\n16\tmain\r\n17\tinc\r\n18\tmain\r\n19\tinc\r\n20\tmain\r\n21\tfib\r\n22\tmain\r\n$gdb_prompt $" {
- # recursive function calls appear only as 1 call
- kfail "gdb/15240" $message
- }
-}
+# at this point we expect to have main, fib, ..., fib, main, where fib occurs 9 times,
+# so we limit the output to only show the latest 11 function calls
+gdb_test_no_output "set record function-call-history-size 11"
+gdb_test "record function-call-history" "
+20\tmain\r
+21\tfib\r
+22\tfib\r
+23\tfib\r
+24\tfib\r
+25\tfib\r
+26\tfib\r
+27\tfib\r
+28\tfib\r
+29\tfib\r
+30\tmain" "show recursive function call history"
--
1.7.1
^ permalink raw reply [flat|nested] 24+ messages in thread* Re: [PATCH 02/15] btrace: change branch trace data structure
2013-05-02 12:04 ` [PATCH 02/15] btrace: change branch trace data structure Markus Metzger
@ 2013-05-13 15:25 ` Jan Kratochvil
2013-05-14 15:27 ` Metzger, Markus T
0 siblings, 1 reply; 24+ messages in thread
From: Jan Kratochvil @ 2013-05-13 15:25 UTC (permalink / raw)
To: Markus Metzger; +Cc: gdb-patches, Christian Himpel
Hi Markus,
as mailed off-list you have an updated version so sending only a partial
review as I have done so far.
Thanks,
Jan
On Thu, 02 May 2013 14:03:23 +0200, Markus Metzger wrote:
> --- a/gdb/btrace.c
> +++ b/gdb/btrace.c
[...]
> -/* Initialize a recorded function segment. */
> +/* Print an ftrace debug status message. */
>
> static void
> -ftrace_init_func (struct btrace_func *bfun, struct minimal_symbol *mfun,
> - struct symbol *fun, unsigned int idx)
> +ftrace_debug (const struct btrace_function *bfun, const char *prefix)
> {
> - bfun->msym = mfun;
> - bfun->sym = fun;
> - bfun->lbegin = INT_MAX;
> - bfun->lend = 0;
> - bfun->ibegin = idx;
> - bfun->iend = idx;
> + const char *fun, *file;
> + unsigned int ibegin, iend;
> + int lbegin, lend, level;
> +
> + fun = ftrace_print_function_name (bfun);
> + file = ftrace_print_filename (bfun);
> + level = bfun->level;
> +
> + lbegin = bfun->lbegin;
> + lend = bfun->lend;
> +
> + ibegin = bfun->insn_offset;
> + iend = ibegin + VEC_length (btrace_insn_s, bfun->insn);
> +
> + DEBUG_FTRACE ("%s: fun = %s, file = %s, level = %d, lines = [%d; %d], "
> + "insn = [%u; %u)", prefix, fun, file, level, lbegin, lend,
> + ibegin, iend);
> }
>
> /* Check whether the function has changed. */
+ /* Return 1 if BFUN does not match MFUN and FUN, return 0 if they match. */
>
> static int
> -ftrace_function_switched (struct btrace_func *bfun,
> - struct minimal_symbol *mfun, struct symbol *fun)
> +ftrace_function_switched (const struct btrace_function *bfun,
> + const struct minimal_symbol *mfun,
> + const struct symbol *fun)
> {
> struct minimal_symbol *msym;
> struct symbol *sym;
>
> - /* The function changed if we did not have one before. */
> - if (bfun == NULL)
> - return 1;
> -
> msym = bfun->msym;
> sym = bfun->sym;
>
> @@ -228,6 +155,14 @@ ftrace_function_switched (struct btrace_func *bfun,
> return 1;
> }
>
> + /* If we lost symbol information, we switched functions. */
> + if (!(msym == NULL && sym == NULL) && mfun == NULL && fun == NULL)
> + return 1;
> +
> + /* If we gained symbol information, we switched functions. */
> + if (msym == NULL && sym == NULL && !(mfun == NULL && fun == NULL))
> + return 1;
> +
> return 0;
> }
>
> @@ -236,7 +171,7 @@ ftrace_function_switched (struct btrace_func *bfun,
> in another file is expanded in this function. */
>
> static int
> -ftrace_skip_file (struct btrace_func *bfun, const char *filename)
> +ftrace_skip_file (const struct btrace_function *bfun, const char *filename)
It was already there such way but please rename the parameter to "fullname",
it should be used together with symtab_to_fullname and not with
symtab_to_filename_for_display.
> {
> struct symbol *sym;
> const char *bfile;
> @@ -254,83 +189,458 @@ ftrace_skip_file (struct btrace_func *bfun, const char *filename)
> return (filename_cmp (bfile, filename) != 0);
> }
>
> -/* Compute the function trace from the instruction trace. */
> +/* Allocate and initialize a new branch trace function segment. */
Something like:
+/* If both FUN and MFUN are NULL it is marker of the end of trace. */
>
> -static VEC (btrace_func_s) *
> -compute_ftrace (VEC (btrace_inst_s) *itrace)
> +static struct btrace_function *
> +ftrace_new_function (struct btrace_function *prev,
> + struct minimal_symbol *mfun,
> + struct symbol *fun)
> {
> - VEC (btrace_func_s) *ftrace;
> - struct btrace_inst *binst;
> - struct btrace_func *bfun;
> - unsigned int idx;
> + struct btrace_function *bfun;
>
> - DEBUG ("compute ftrace");
> + bfun = xzalloc (sizeof (*bfun));
>
> - ftrace = NULL;
> - bfun = NULL;
> + bfun->msym = mfun;
> + bfun->sym = fun;
bfun->msym == NULL && bfun->sym == NULL is not allowed according to the
comments in struct btrace_function. It should be gdb_assert-ed here.
> + bfun->lbegin = INT_MAX;
It would be nice to comment here or at LBEGIN what does INT_MAX mean for it.
> + bfun->flow.prev = prev;
>
> - for (idx = 0; VEC_iterate (btrace_inst_s, itrace, idx, binst); ++idx)
> + if (prev != NULL)
> {
> - struct symtab_and_line sal;
> - struct bound_minimal_symbol mfun;
> - struct symbol *fun;
> - const char *filename;
> + gdb_assert (prev->flow.next == NULL);
> + prev->flow.next = bfun;
> +
> + bfun->number = prev->number + 1;
> + bfun->insn_offset = prev->insn_offset
> + + VEC_length (btrace_insn_s, prev->insn);
GNU Coding Standards require multi-line expressions to use parentheses,
therefore:
bfun->insn_offset = (prev->insn_offset
+ VEC_length (btrace_insn_s, prev->insn));
> + }
> +
> + return bfun;
> +}
> +
> +/* Update the UP field of a function segment. */
> +
> +static void
> +ftrace_update_caller (struct btrace_function *bfun,
> + struct btrace_function *caller)
> +{
> + if (bfun->up != NULL)
> + ftrace_debug (bfun, "updating caller");
> +
> + bfun->up = caller;
> +
> + ftrace_debug (bfun, "set caller");
> +}
> +
> +/* Fix up the caller for a function segment. */
> +
> +static void
> +ftrace_fixup_caller (struct btrace_function *bfun,
> + struct btrace_function *caller)
> +{
> + struct btrace_function *prev, *next;
> +
> + ftrace_update_caller (bfun, caller);
> +
> + /* Update all function segments belonging to the same function. */
> + for (prev = bfun->segment.prev; prev != NULL; prev = prev->segment.prev)
> + ftrace_update_caller (prev, caller);
> +
> + for (next = bfun->segment.next; next != NULL; next = next->segment.next)
> + ftrace_update_caller (next, caller);
> +}
> +
> +/* Add a new function segment for a call. */
> +
> +static struct btrace_function *
> +ftrace_new_call (struct btrace_function *caller,
> + struct minimal_symbol *mfun,
> + struct symbol *fun)
> +{
> + struct btrace_function *bfun;
> +
> + bfun = ftrace_new_function (caller, mfun, fun);
> + bfun->up = caller;
> + bfun->level = caller->level + 1;
> +
> + ftrace_debug (bfun, "new call");
> +
> + return bfun;
> +}
> +
> +/* Add a new function segment for a tail call. */
> +
> +static struct btrace_function *
> +ftrace_new_tailcall (struct btrace_function *caller,
> + struct minimal_symbol *mfun,
> + struct symbol *fun)
> +{
> + struct btrace_function *bfun;
> +
> + bfun = ftrace_new_function (caller, mfun, fun);
> + bfun->up = caller;
> + bfun->level = caller->level + 1;
> + bfun->flags |= bfun_up_links_to_tailcall;
> +
> + ftrace_debug (bfun, "new tail call");
> +
> + return bfun;
> +}
> +
> +/* Find the caller of BFUN.
> + This is the first function segment up the call stack from BFUN with
> + MFUN/FUN symbol information. */
Maybe if you find appropriate:
+ /* Try to find chronologically previous execution segment of the MFUN/FUN
function before the MFUN/FUN function did call BFUN. */
> +
> +static struct btrace_function *
> +ftrace_find_caller (struct btrace_function *bfun,
> + struct minimal_symbol *mfun,
> + struct symbol *fun)
> +{
> + for (; bfun != NULL; bfun = bfun->up)
> + {
> + /* Skip functions with incompatible symbol information. */
> + if (ftrace_function_switched (bfun, mfun, fun))
> + continue;
> +
> + /* This is the function segment we're looking for. */
> + break;
> + }
> +
> + return bfun;
> +}
> +
> +/* Find the last actual call in the back trace of BFUN. */
+ /* Generally skip any segments ending just with a jump. */
> +
> +static struct btrace_function *
> +ftrace_find_call (struct gdbarch *gdbarch, struct btrace_function *bfun)
> +{
> + if (!gdbarch_insn_call_p_p (gdbarch))
> + return NULL;
> +
> + for (; bfun != NULL; bfun = bfun->up)
> + {
> + struct btrace_insn *last;
> CORE_ADDR pc;
>
> - pc = binst->pc;
> + if (VEC_empty (btrace_insn_s, bfun->insn))
> + continue;
Shouldn't here be rather "return NULL" instead of continue? I do not
understand in which cases BFUN->INSN can be empty. Empty INSN case could be
also described in btrace_function->insn.
> +
> + last = VEC_last (btrace_insn_s, bfun->insn);
> + pc = last->pc;
> +
> + if (gdbarch_insn_call_p (gdbarch, pc))
> + break;
> + }
> +
> + return bfun;
> +}
> +
> +/* Add a new function segment for a return. */
It needs to comment the meaning of PREV, MFUN and FUN.
I was also missing here description of what this function does, not sure if
maybe helpful:
+ /* Connect the new execution segment BFUN with the previously executing
segment of the same function at the same caller level. */
> +
> +static struct btrace_function *
> +ftrace_new_return (struct gdbarch *gdbarch,
> + struct btrace_function *prev,
> + struct minimal_symbol *mfun,
> + struct symbol *fun)
> +{
> + struct btrace_function *bfun, *caller;
> +
> + bfun = ftrace_new_function (prev, mfun, fun);
> +
> + /* It is important to start at PREV's caller. Otherwise, we might find
> + PREV itself, if PREV is a recursive function. */
> + caller = ftrace_find_caller (prev->up, mfun, fun);
> + if (caller != NULL)
> + {
> + /* The caller of PREV is the preceding btrace function segment in this
> + function instance. */
> + gdb_assert (caller->segment.next == NULL);
> +
> + caller->segment.next = bfun;
> + bfun->segment.prev = caller;
> +
> + /* Maintain the function level. */
> + bfun->level = caller->level;
>
> - /* Try to determine the function we're in. We use both types of symbols
> - to avoid surprises when we sometimes get a full symbol and sometimes
> - only a minimal symbol. */
> - fun = find_pc_function (pc);
> - mfun = lookup_minimal_symbol_by_pc (pc);
> + /* Maintain the call stack. */
> + bfun->up = caller->up;
>
> - if (fun == NULL && mfun.minsym == NULL)
> + ftrace_debug (bfun, "new return");
> + }
> + else
> + {
> + /* We did not find a caller. This could mean that something went
> + wrong or that the call is simply not included in the trace. */
> +
> + /* Let's search for some actual call. */
> + caller = ftrace_find_call (gdbarch, prev->up);
> + if (caller == NULL)
> {
> - DEBUG_FTRACE ("no symbol at %u, pc=%s", idx,
> - core_addr_to_string_nz (pc));
> - continue;
> - }
> + /* There is no call in PREV's back trace. We assume that the
> + branch trace did not include it. */
> +
> + /* Let's find the topmost call function - this skips tail calls. */
> + while (prev->up != NULL)
> + prev = prev->up;
>
> - /* If we're switching functions, we start over. */
> - if (ftrace_function_switched (bfun, mfun.minsym, fun))
> + /* We maintain levels for a series of returns for which we have
> + not seen the calls, but we restart at level 0, otherwise. */
> + bfun->level = min (0, prev->level) - 1;
> +
> + /* Fix up the call stack for PREV. */
> + ftrace_fixup_caller (prev, bfun);
> + prev->flags |= bfun_up_links_to_ret;
> +
> + ftrace_debug (bfun, "new return - no caller");
> + }
> + else
> {
> - bfun = VEC_safe_push (btrace_func_s, ftrace, NULL);
> + /* There is a call in PREV's back trace to which we should have
> + returned. Let's remain at this level. */
> + bfun->level = prev->level;
>
> - ftrace_init_func (bfun, mfun.minsym, fun, idx);
> - ftrace_debug (bfun, "init");
> + ftrace_debug (bfun, "new return - unknown caller");
> }
> + }
> +
> + return bfun;
> +}
> +
> +/* Add a new function segment for a function switch. */
Describe what is "switch" and its relationhsip to MFUN/FUN.
Drop unused parameter INSN.
> +
> +static struct btrace_function *
> +ftrace_new_switch (struct btrace_function *prev,
> + struct minimal_symbol *mfun,
> + struct symbol *fun,
> + const struct btrace_insn *insn)
> +{
> + struct btrace_function *bfun;
> +
> + /* This is an unexplained function switch. The call stack will likely
> + be wrong at this point. */
> + bfun = ftrace_new_function (prev, mfun, fun);
> +
> + /* We keep the function level. */
> + bfun->level = prev->level;
> +
> + ftrace_debug (bfun, "new switch");
>
> - /* Update the instruction range. */
> - bfun->iend = idx;
> - ftrace_debug (bfun, "update insns");
> + return bfun;
> +}
> +
> +/* Update the branch trace function segment. Never returns NULL. */
What is the meaning of BFUN and PC? What and why does it update?
I would also call it more like "add", it creates new records.
> +
> +static struct btrace_function *
> +ftrace_update_function (struct gdbarch *gdbarch,
> + struct btrace_function *bfun, CORE_ADDR pc)
> +{
> + struct bound_minimal_symbol bmfun;
> + struct minimal_symbol *mfun;
> + struct symbol *fun;
> + struct btrace_insn *last;
> +
> + /* Try to determine the function we're in. We use both types of symbols
> + to avoid surprises when we sometimes get a full symbol and sometimes
> + only a minimal symbol. */
> + fun = find_pc_function (pc);
> + bmfun = lookup_minimal_symbol_by_pc (pc);
> + mfun = bmfun.minsym;
> +
> + if (fun == NULL && mfun == NULL)
> + DEBUG_FTRACE ("no symbol at %s", core_addr_to_string_nz (pc));
> +
> + /* If we didn't have a function before, we create one. */
> + if (bfun == NULL)
> + return ftrace_new_function (bfun, mfun, fun);
mfun == NULL && fun == NULL is not allowed according to the comments in struct
btrace_function. Already suggested gdb_assert for ftrace_new_function but it
seems it may happen here.
> +
> + /* Check the last instruction, if we have one.
> + We do this check first, since it allows us to fill in the call stack
> + links in addition to the normal flow links. */
> + last = NULL;
> + if (!VEC_empty (btrace_insn_s, bfun->insn))
> + last = VEC_last (btrace_insn_s, bfun->insn);
>
> - /* Let's see if we have source correlation, as well. */
> - sal = find_pc_line (pc, 0);
> - if (sal.symtab == NULL || sal.line == 0)
> + if (last != NULL)
This conditional is excessive, it is true iff !VEC_empty is true above.
> + {
> + CORE_ADDR lpc;
> +
> + lpc = last->pc;
> +
> + /* Check for returns. */
> + if (gdbarch_insn_ret_p_p (gdbarch) && gdbarch_insn_ret_p (gdbarch, lpc))
> + return ftrace_new_return (gdbarch, bfun, mfun, fun);
> +
> + /* Check for calls. */
> + if (gdbarch_insn_call_p_p (gdbarch) && gdbarch_insn_call_p (gdbarch, lpc))
> {
> - DEBUG_FTRACE ("no lines at %u, pc=%s", idx,
> - core_addr_to_string_nz (pc));
> - continue;
> + int size;
> +
> + size = gdb_insn_length (gdbarch, lpc);
> +
> + /* Ignore calls to the next instruction. They are used for PIC. */
> + if (lpc + size != pc)
> + return ftrace_new_call (bfun, mfun, fun);
> }
> + }
> +
> + /* Check if we're switching functions for some other reason. */
> + if (ftrace_function_switched (bfun, mfun, fun))
> + {
> + DEBUG_FTRACE ("switching from %s in %s at %s",
> + ftrace_print_insn_addr (last),
> + ftrace_print_function_name (bfun),
> + ftrace_print_filename (bfun));
>
> - /* Check if we switched files. This could happen if, say, a macro that
> - is defined in another file is expanded here. */
> - filename = symtab_to_fullname (sal.symtab);
> - if (ftrace_skip_file (bfun, filename))
> + if (last != NULL)
> {
> - DEBUG_FTRACE ("ignoring file at %u, pc=%s, file=%s", idx,
> - core_addr_to_string_nz (pc), filename);
> - continue;
> + CORE_ADDR start, lpc;
> +
> + /* If we have symbol information for our current location, use
> + it to check that we jump to the start of a function. */
> + if (fun != NULL || mfun != NULL)
> + start = get_pc_function_start (pc);
> + else
> + start = pc;
This goes into implementation detail of get_pc_function_start. Rather always
call get_pc_function_start but one should check if it failed anyway
- get_pc_function_start returns 0 if it has failed.
Or was the 'fun != NULL || mfun != NULL' check there for performance reasons?
> +
> + lpc = last->pc;
> +
> + /* Jumps indicate optimized tail calls. */
> + if (start == pc
> + && gdbarch_insn_jump_p_p (gdbarch)
> + && gdbarch_insn_jump_p (gdbarch, lpc))
> + return ftrace_new_tailcall (bfun, mfun, fun);
Cannot a plain intra-function jump be confused into a tailcall due to
a tripped binary? FUN and MFUN are NULL for stripped binary,
therefore "start = pc;" gets assigned above, which will call
ftrace_new_tailcall. I did not try to reproduce it, though.
> }
>
> - /* Update the line range. */
> - bfun->lbegin = min (bfun->lbegin, sal.line);
> - bfun->lend = max (bfun->lend, sal.line);
> - ftrace_debug (bfun, "update lines");
> + return ftrace_new_switch (bfun, mfun, fun, last);
> + }
> +
> + return bfun;
> +}
> +
> +/* Update the source correlation for a branch trace function segment. */
> +
> +static void
> +ftrace_update_lines (struct btrace_function *bfun, CORE_ADDR pc)
> +{
> + struct symtab_and_line sal;
> + const char *filename;
Please rename the variable to "fullname", it should be used together with
symtab_to_fullname and not with symtab_to_filename_for_display.
> +
> + sal = find_pc_line (pc, 0);
> + if (sal.symtab == NULL || sal.line == 0)
> + {
> + DEBUG_FTRACE ("no lines at %s", core_addr_to_string_nz (pc));
> + return;
> + }
> +
> + /* Check if we switched files. This could happen if, say, a macro that
> + is defined in another file is expanded here. */
> + filename = symtab_to_fullname (sal.symtab);
> + if (ftrace_skip_file (bfun, filename))
> + {
> + DEBUG_FTRACE ("ignoring file at %s, file=%s",
> + core_addr_to_string_nz (pc), filename);
> + return;
> }
>
> - return ftrace;
> + /* Update the line range. */
> + bfun->lbegin = min (bfun->lbegin, sal.line);
> + bfun->lend = max (bfun->lend, sal.line);
> +
> + if (record_debug > 1)
> + ftrace_debug (bfun, "update lines");
> +}
> +
> +/* Update the instructions for a branch trace function segment. */
It is more appropriate to call it "add", "append" or similar.
> +
> +static void
> +ftrace_update_insns (struct btrace_function *bfun, CORE_ADDR pc)
> +{
> + struct btrace_insn *insn;
> +
> + insn = VEC_safe_push (btrace_insn_s, bfun->insn, NULL);
> + insn->pc = pc;
> +
> + if (record_debug > 1)
> + ftrace_debug (bfun, "update insn");
> +}
> +
> +/* Compute the function branch trace. */
Describe the parameters.
> +
> +static void
> +btrace_compute_ftrace (struct btrace_thread_info *btinfo,
> + VEC (btrace_block_s) *btrace)
> +{
> + struct btrace_function *begin, *end;
> + struct gdbarch *gdbarch;
> + unsigned int blk;
> + int level;
> +
> + DEBUG ("compute ftrace");
> +
> + gdbarch = target_gdbarch ();
> + begin = NULL;
> + end = NULL;
> + level = INT_MAX;
> + blk = VEC_length (btrace_block_s, btrace);
> +
> + while (blk != 0)
> + {
> + btrace_block_s *block;
> + CORE_ADDR pc;
> +
> + blk -= 1;
> +
> + block = VEC_index (btrace_block_s, btrace, blk);
> + pc = block->begin;
> +
> + for (;;)
> + {
> + int size;
> +
> + /* We should hit the end of the block. Warn if we went too far. */
> + if (block->end < pc)
> + {
> + warning (_("Recorded trace may be corrupted."));
One could also print BEGIN and END to be more suggestive what could break.
> + break;
> + }
> +
> + end = ftrace_update_function (gdbarch, end, pc);
> + if (begin == NULL)
> + begin = end;
> +
> + /* Maintain the function level offset. */
> + level = min (level, end->level);
> +
> + ftrace_update_insns (end, pc);
> + ftrace_update_lines (end, pc);
> +
> + /* We're done once we pushed the instruction at the end. */
> + if (block->end == pc)
> + break;
> +
> + size = gdb_insn_length (gdbarch, pc);
> +
> + /* Make sure we terminate if we fail to compute the size. */
> + if (size <= 0)
> + {
> + warning (_("Recorded trace may be incomplete."));
One could also print PC to be more suggestive what could break.
> + break;
> + }
> +
> + pc += size;
> + }
> + }
> +
> + /* Add an empty dummy function to mark the end of the branch trace. */
> + end = ftrace_new_function (end, NULL, NULL);
> +
> + btinfo->begin = begin;
> + btinfo->end = end;
> +
> + /* LEVEL is the minimal function level of all btrace function segments.
> + Define the global level offset to -LEVEL so all function levels are
> + normalized to start at zero. */
> + btinfo->level = -level;
> }
>
> /* See btrace.h. */
> @@ -394,6 +704,7 @@ btrace_fetch (struct thread_info *tp)
> {
> struct btrace_thread_info *btinfo;
> VEC (btrace_block_s) *btrace;
> + struct cleanup *cleanup;
>
> DEBUG ("fetch thread %d (%s)", tp->num, target_pid_to_str (tp->ptid));
>
> @@ -402,18 +713,15 @@ btrace_fetch (struct thread_info *tp)
> return;
>
> btrace = target_read_btrace (btinfo->target, btrace_read_new);
> - if (VEC_empty (btrace_block_s, btrace))
> - return;
> -
> - btrace_clear (tp);
> + cleanup = make_cleanup (VEC_cleanup (btrace_block_s), &btrace);
>
> - btinfo->btrace = btrace;
> - btinfo->itrace = compute_itrace (btinfo->btrace);
> - btinfo->ftrace = compute_ftrace (btinfo->itrace);
> + if (!VEC_empty (btrace_block_s, btrace))
> + {
> + btrace_clear (tp);
> + btrace_compute_ftrace (btinfo, btrace);
> + }
If BTRACE is empty shouldn't TP's btrace be still cleared?
>
> - /* Initialize branch trace iterators. */
> - btrace_init_insn_iterator (btinfo);
> - btrace_init_func_iterator (btinfo);
> + do_cleanups (cleanup);
> }
>
> /* See btrace.h. */
> @@ -422,18 +730,29 @@ void
> btrace_clear (struct thread_info *tp)
> {
> struct btrace_thread_info *btinfo;
> + struct btrace_function *it, *trash;
>
> DEBUG ("clear thread %d (%s)", tp->num, target_pid_to_str (tp->ptid));
>
> btinfo = &tp->btrace;
>
> - VEC_free (btrace_block_s, btinfo->btrace);
> - VEC_free (btrace_inst_s, btinfo->itrace);
> - VEC_free (btrace_func_s, btinfo->ftrace);
> + it = btinfo->begin;
> + while (it != NULL)
> + {
> + trash = it;
> + it = it->flow.next;
> +
> + xfree (trash);
> + }
> +
> + btinfo->begin = NULL;
> + btinfo->end = NULL;
>
> - btinfo->btrace = NULL;
> - btinfo->itrace = NULL;
> - btinfo->ftrace = NULL;
> + xfree (btinfo->insn_history);
> + xfree (btinfo->call_history);
> +
> + btinfo->insn_history = NULL;
> + btinfo->call_history = NULL;
> }
>
> /* See btrace.h. */
> @@ -541,3 +860,301 @@ parse_xml_btrace (const char *buffer)
>
> return btrace;
> }
> +
> +/* See btrace.h. */
> +
> +const struct btrace_insn *
> +btrace_insn_get (const struct btrace_insn_iterator *it)
> +{
> + struct btrace_function *function;
> + unsigned int index, end;
> +
> + if (it == NULL)
> + return NULL;
> +
> + index = it->index;
> + function = it->function;
> + if (function == NULL)
> + return NULL;
> +
> + end = VEC_length (btrace_insn_s, function->insn);
> + if (end == 0)
> + return NULL;
> +
> + gdb_assert (index < end);
> +
> + return VEC_index (btrace_insn_s, function->insn, index);
> +}
> +
> +/* See btrace.h. */
> +
> +unsigned int
> +btrace_insn_number (const struct btrace_insn_iterator *it)
> +{
> + struct btrace_function *function;
> +
> + if (it == NULL)
> + return 0;
> +
> + function = it->function;
> + if (function == NULL)
> + return 0;
> +
> + return function->insn_offset + it->index;
> +}
> +
> +/* See btrace.h. */
> +
> +void
> +btrace_insn_begin (struct btrace_insn_iterator *it,
> + struct btrace_thread_info *btinfo)
> +{
> + struct btrace_function *begin;
> +
> + begin = btinfo->begin;
> + if (begin == NULL)
> + error (_("No trace."));
> +
> + it->function = begin;
> + it->index = 0;
> +}
> +
> +/* See btrace.h. */
> +
> +void
> +btrace_insn_end (struct btrace_insn_iterator *it,
> + struct btrace_thread_info *btinfo)
> +{
> + struct btrace_function *end;
> +
> + end = btinfo->end;
> + if (end == NULL)
> + error (_("No trace."));
> +
> + /* The last function is an empty dummy. */
> + it->function = end;
> + it->index = 0;
> +}
> +
> +/* See btrace.h. */
> +
> +unsigned int
> +btrace_insn_next (struct btrace_insn_iterator * it, unsigned int stride)
> +{
> + struct btrace_function *function;
> + unsigned int index, end, space, adv, steps;
Some of the declarations like 'end' and 'adv' can be moved to the more inner
block.
> +
> + if (it == NULL)
> + return 0;
> +
> + function = it->function;
> + if (function == NULL)
> + return 0;
> +
> + steps = 0;
> + index = it->index;
> +
> + while (stride != 0)
> + {
> + end = VEC_length (btrace_insn_s, function->insn);
> +
> + /* Compute the number of instructions remaining in this segment. */
> + gdb_assert ((end == 0 && index == 0) || index < end);
> + space = end - index;
> +
> + /* Advance the iterator as far as possible within this segment. */
> + adv = min (space, stride);
> + stride -= adv;
> + index += adv;
> + steps += adv;
> +
> + /* Move to the next function if we're at the end of this one. */
> + if (index == end)
> + {
> + struct btrace_function *next;
> +
> + next = function->flow.next;
> + if (next == NULL)
> + {
> + /* We stepped past the last function - an empty dummy. */
> + gdb_assert (adv == 0);
> + break;
> + }
> +
> + /* We now point to the first instruction in the new function. */
> + function = next;
> + index = 0;
> + }
> +
> + /* We did make progress. */
> + gdb_assert (adv > 0);
> + }
> +
> + /* Update the iterator. */
> + it->function = function;
> + it->index = index;
> +
> + return steps;
> +}
> +
> +/* See btrace.h. */
> +
> +unsigned int
> +btrace_insn_prev (struct btrace_insn_iterator * it, unsigned int stride)
> +{
> + struct btrace_function *function;
> + unsigned int index, adv, steps;
Some of the declarations like 'end' and 'adv' can be moved to the more inner
block.
> +
> + if (it == NULL)
> + return 0;
> +
> + function = it->function;
> + if (function == NULL)
> + return 0;
> +
> + steps = 0;
> + index = it->index;
> +
> + while (stride != 0)
> + {
> + /* Move to the previous function if we're at the start of this one. */
> + if (index == 0)
> + {
> + struct btrace_function *prev;
> +
> + prev = function->flow.prev;
> + if (prev == NULL)
> + break;
> +
> + /* We point to one after the last instruction in the new function. */
> + function = prev;
> + index = VEC_length (btrace_insn_s, function->insn);
> +
> + /* There is at least one instruction in this function segment. */
> + gdb_assert (index > 0);
> + }
> +
> + /* Advance the iterator as far as possible within this segment. */
> + adv = min (index, stride);
> + stride -= adv;
> + index -= adv;
> + steps += adv;
> +
> + /* We did make progress. */
> + gdb_assert (adv > 0);
> + }
> +
> + /* Update the iterator. */
> + it->function = function;
> + it->index = index;
> +
> + return steps;
> +}
> +
> +/* See btrace.h. */
> +
> +int
> +btrace_insn_cmp (const struct btrace_insn_iterator *lhs,
> + const struct btrace_insn_iterator *rhs)
> +{
> + unsigned int lnum, rnum;
> +
> + lnum = btrace_insn_number (lhs);
> + rnum = btrace_insn_number (rhs);
> +
> + return (int) (lnum - rnum);
> +}
> +
> +/* See btrace.h. */
> +
> +int
> +btrace_find_insn_by_number (struct btrace_insn_iterator *it,
> + const struct btrace_thread_info *btinfo,
> + unsigned int number)
> +{
> + struct btrace_function *bfun;
> + unsigned int last;
> +
> + for (bfun = btinfo->end; bfun != NULL; bfun = bfun->flow.prev)
> + if (bfun->insn_offset <= number)
> + break;
> +
> + if (bfun == NULL)
> + return 0;
> +
> + last = bfun->insn_offset + VEC_length (btrace_insn_s, bfun->insn);
I do not find 'last' as a right name, the number is right after the last
element.
> + if (last <= number)
> + return 0;
> +
> + it->function = bfun;
> + it->index = number - bfun->insn_offset;
> +
> + return 1;
> +}
> +
> +/* See btrace.h. */
> +
> +struct btrace_function *
> +btrace_find_function_by_number (const struct btrace_thread_info *btinfo,
> + unsigned int number)
> +{
> + struct btrace_function *bfun;
> +
> + if (btinfo == NULL)
> + return NULL;
> +
> + for (bfun = btinfo->end; bfun != NULL; bfun = bfun->flow.prev)
> + {
> + unsigned int bnum;
> +
> + bnum = bfun->number;
> + if (number == bnum)
> + return bfun;
> +
> + /* Functions are ordered and numbered consecutively. We could bail out
> + earlier. On the other hand, it is very unlikely that we search for
> + a nonexistent function. */
> + }
> +
> + return NULL;
> +}
> +
> +/* See btrace.h. */
> +
> +void
> +btrace_set_insn_history (struct btrace_thread_info *btinfo,
> + struct btrace_insn_iterator *begin,
> + struct btrace_insn_iterator *end)
> +{
> + struct btrace_insn_history *history;
> +
> + history = btinfo->insn_history;
> + if (history == NULL)
> + {
> + history = xzalloc (sizeof (*history));
> + btinfo->insn_history = history;
> + }
This seems slightly prone to errors to me, why not:
if (btinfo->insn_history == NULL)
btinfo->insn_history = xzalloc (sizeof (*btinfo->insn_history));
history = btinfo->insn_history;
> +
> + history->begin = *begin;
> + history->end = *end;
> +}
> +
> +/* See btrace.h. */
> +
> +void
> +btrace_set_call_history (struct btrace_thread_info *btinfo,
> + struct btrace_function *begin,
> + struct btrace_function *end)
> +{
> + struct btrace_call_history *history;
> +
> + history = btinfo->call_history;
> + if (history == NULL)
> + {
> + history = xzalloc (sizeof (*history));
> + btinfo->call_history = history;
> + }
Likewise.
> +
> + history->begin = begin;
> + history->end = end;
> +}
> diff --git a/gdb/btrace.h b/gdb/btrace.h
> index bd8425d..ac7acdb 100644
> --- a/gdb/btrace.h
> +++ b/gdb/btrace.h
> @@ -29,63 +29,106 @@
> #include "btrace-common.h"
>
> struct thread_info;
> +struct btrace_function;
>
> /* A branch trace instruction.
>
> This represents a single instruction in a branch trace. */
> -struct btrace_inst
> +struct btrace_insn
> {
> /* The address of this instruction. */
> CORE_ADDR pc;
> };
>
> -/* A branch trace function.
> +/* A vector of branch trace instructions. */
> +typedef struct btrace_insn btrace_insn_s;
> +DEF_VEC_O (btrace_insn_s);
> +
> +/* A doubly-linked list of branch trace function segments. */
> +struct btrace_func_link
> +{
> + struct btrace_function *prev;
> + struct btrace_function *next;
> +};
> +
> +/* Flags for btrace function segments. */
> +enum btrace_function_flag
> +{
> + /* The 'up' link interpretation.
> + If set, it points to the function segment we returned to.
> + If clear, it points to the function segment we called from. */
> + bfun_up_links_to_ret = (1 << 0),
> +
> + /* The 'up' link points to a tail call. This obviously only makes sense
> + if bfun_up_links_to_ret is clear. */
> + bfun_up_links_to_tailcall = (1 << 1)
enum values are uppercased in GDB.
But I do not see a reason to have the flags field and this enum. You can just
use something like
unsigned bfun_up_links_to_ret : 1;
unsigned bfun_up_links_to_tailcall : 1;
instead of the flags field in struct btrace_function. Sometimes one wants to
pass FLAGS as a function parameter but that is not the case here.
> +};
> +
> +/* A branch trace function segment.
>
> This represents a function segment in a branch trace, i.e. a consecutive
> number of instructions belonging to the same function. */
> -struct btrace_func
> +struct btrace_function
> {
> /* The full and minimal symbol for the function. One of them may be NULL. */
IIUC from the code rather:
+/* Iff both FUN and MFUN are NULL it is marker of the end of trace. */
> struct minimal_symbol *msym;
> struct symbol *sym;
>
> + /* The previous and next segment belonging to the same function. */
> + struct btrace_func_link segment;
> +
> + /* The previous and next function in control flow order. */
> + struct btrace_func_link flow;
> +
> + /* The directly preceding function segment in a (fake) call stack. */
> + struct btrace_function *up;
> +
> + /* The instructions in this function segment. */
> + VEC (btrace_insn_s) *insn;
> +
> + /* The instruction number offset for the first instruction in this
> + function segment. */
> + unsigned int insn_offset;
> +
> + /* The function number. */
It should have some better description, also it may help that it ordered
according to the 'flow' pointers.
> + unsigned int number;
> +
> + /* The function level. */
+ /* Callee has LEVEL value 1 higher than its caller. */
Also some comment how it is relative to btrace_thread_info>level.
> + int level;
> +
> /* The source line range of this function segment (both inclusive). */
> int lbegin, lend;
>
> - /* The instruction number range in the instruction trace corresponding
> - to this function segment (both inclusive). */
> - unsigned int ibegin, iend;
> + /* A bit-vector of btrace_function_flag. */
> + unsigned int flags;
> };
>
> -/* Branch trace may also be represented as a vector of:
> -
> - - branch trace instructions starting with the oldest instruction.
> - - branch trace functions starting with the oldest function. */
> -typedef struct btrace_inst btrace_inst_s;
> -typedef struct btrace_func btrace_func_s;
> +/* A branch trace instruction iterator. */
> +struct btrace_insn_iterator
> +{
> + /* The branch trace function segment containing the instruction. */
> + struct btrace_function *function;
>
> -/* Define functions operating on branch trace vectors. */
> -DEF_VEC_O (btrace_inst_s);
> -DEF_VEC_O (btrace_func_s);
> + /* The index into the function segment's instruction vector. */
> + unsigned int index;
> +};
>
> /* Branch trace iteration state for "record instruction-history". */
> -struct btrace_insn_iterator
> +struct btrace_insn_history
> {
> - /* The instruction index range from begin (inclusive) to end (exclusive)
> - that has been covered last time.
> - If end < begin, the branch trace has just been updated. */
> - unsigned int begin;
> - unsigned int end;
> + /* The branch trace instruction range from begin (inclusive) to
> + end (exclusive) that has been covered last time. */
> + struct btrace_insn_iterator begin;
> + struct btrace_insn_iterator end;
> };
>
> /* Branch trace iteration state for "record function-call-history". */
> -struct btrace_func_iterator
> +struct btrace_call_history
> {
> - /* The function index range from begin (inclusive) to end (exclusive)
> - that has been covered last time.
> - If end < begin, the branch trace has just been updated. */
> - unsigned int begin;
> - unsigned int end;
> + /* The branch trace function range from begin (inclusive) to end (exclusive)
> + that has been covered last time. */
> + struct btrace_function *begin;
> + struct btrace_function *end;
> };
>
> /* Branch trace information per thread.
> @@ -104,15 +147,19 @@ struct btrace_thread_info
> struct btrace_target_info *target;
>
> /* The current branch trace for this thread. */
> - VEC (btrace_block_s) *btrace;
> - VEC (btrace_inst_s) *itrace;
> - VEC (btrace_func_s) *ftrace;
> + struct btrace_function *begin;
> + struct btrace_function *end;
Note inclusivity/exclusivity. Maybe END always points to the dummy
MFUN==FUN==NULL record? May be BEGIN NULL (and if it is, is automatically
also END NULL?)?
> +
> + /* The function level offset. When added to each function's level,
> + this normalizes the function levels such that the smallest level
> + becomes zero. */
> + int level;
>
> /* The instruction history iterator. */
> - struct btrace_insn_iterator insn_iterator;
> + struct btrace_insn_history *insn_history;
>
> /* The function call history iterator. */
> - struct btrace_func_iterator func_iterator;
> + struct btrace_call_history *call_history;
> };
>
> /* Enable branch tracing for a thread. */
> @@ -139,4 +186,60 @@ extern void btrace_free_objfile (struct objfile *);
> /* Parse a branch trace xml document into a block vector. */
> extern VEC (btrace_block_s) *parse_xml_btrace (const char*);
>
> +/* Dereference a branch trace instruction iterator. Return a pointer to the
> + instruction the iterator points to or NULL if the interator does not point
> + to a valid instruction. */
> +extern const struct btrace_insn *
> +btrace_insn_get (const struct btrace_insn_iterator *);
The indentation should be:
extern const struct btrace_insn *
btrace_insn_get (const struct btrace_insn_iterator *);
so that various tools do not consider it a function definition.
> +
> +/* Return the instruction number for a branch trace iterator. Returns zero
s/the instruction number/index of the instruction relative to start of the
function/
> + if the iterator does not point to a valid instruction. */
> +extern unsigned int btrace_insn_number (const struct btrace_insn_iterator *);
> +
> +/* Initialize a branch trace instruction iterator to point to the begin/end of
> + the branch trace. Throws an error if there is no branch trace. */
> +extern void btrace_insn_begin (struct btrace_insn_iterator *,
> + struct btrace_thread_info *);
> +extern void btrace_insn_end (struct btrace_insn_iterator *,
> + struct btrace_thread_info *);
> +
> +/* Increment/decrement a branch trace instruction iterator. Return the number
> + of instructions by which the instruction iterator has been advanced.
> + Returns zero, if the operation failed. */
> +extern unsigned int btrace_insn_next (struct btrace_insn_iterator *,
> + unsigned int stride);
> +extern unsigned int btrace_insn_prev (struct btrace_insn_iterator *,
> + unsigned int stride);
> +
> +/* Compare two branch trace instruction iterators.
> + Return a negative number if LHS < RHS.
> + Return zero if LHS == RHS.
> + Return a positive number if LHS > RHS. */
> +extern int btrace_insn_cmp (const struct btrace_insn_iterator *lhs,
> + const struct btrace_insn_iterator *rhs);
> +
> +/* Find an instruction in the function branch trace by its number.
> + If the instruction is found, initialize the branch trace instruction
> + iterator to point to this instruction and return 1.
> + Return 0, otherwise. */
> +extern int btrace_find_insn_by_number (struct btrace_insn_iterator *,
> + const struct btrace_thread_info *,
> + unsigned int number);
> +
> +/* Find a function in the function branch trace by its number.
> + Return a pointer to that function or NULL if no such function is found. */
> +extern struct btrace_function *
> +btrace_find_function_by_number (const struct btrace_thread_info *,
> + unsigned int number);
The indentation should be:
extern struct btrace_function *
btrace_find_function_by_number (const struct btrace_thread_info *,
unsigned int number);
so that various tools do not consider it a function definition.
> +
> +/* Set the branch trace instruction history to [BEGIN; END). */
Please use words inclusive/exclusive instead of the parantheses types.
> +extern void btrace_set_insn_history (struct btrace_thread_info *,
> + struct btrace_insn_iterator *begin,
> + struct btrace_insn_iterator *end);
> +
> +/* Set the branch trace function call history to [BEGIN; END). */
Please use words inclusive/exclusive instead of the parantheses types.
> +extern void btrace_set_call_history (struct btrace_thread_info *,
> + struct btrace_function *begin,
> + struct btrace_function *end);
> +
> #endif /* BTRACE_H */
> diff --git a/gdb/record-btrace.c b/gdb/record-btrace.c
> index 8fb413e..e2506a8 100644
> --- a/gdb/record-btrace.c
> +++ b/gdb/record-btrace.c
> @@ -74,7 +74,7 @@ require_btrace (void)
>
> btinfo = &tp->btrace;
>
> - if (VEC_empty (btrace_inst_s, btinfo->itrace))
> + if (btinfo->begin == NULL)
> error (_("No trace."));
>
> return btinfo;
> @@ -205,6 +205,7 @@ static void
> record_btrace_info (void)
> {
> struct btrace_thread_info *btinfo;
> + struct btrace_function *bfun;
> struct thread_info *tp;
> unsigned int insts, funcs;
>
> @@ -217,8 +218,15 @@ record_btrace_info (void)
> btrace_fetch (tp);
>
> btinfo = &tp->btrace;
> - insts = VEC_length (btrace_inst_s, btinfo->itrace);
> - funcs = VEC_length (btrace_func_s, btinfo->ftrace);
> + bfun = btinfo->end;
> + insts = 0;
nitpick: You could call it 'insns' when standardizing on the 'insn'
abbreviation (instead of previous inst).
> + funcs = 0;
> +
> + if (bfun != NULL)
> + {
> + funcs = bfun->number;
> + insts = bfun->insn_offset + VEC_length (btrace_insn_s, bfun->insn);
If BFUN always points to the last MFUN==FUN==NULL marker maybe we should
rather gdb_assert (VEC_empty (btrace_insn_s, bfun->insn));
> + }
>
> printf_unfiltered (_("Recorded %u instructions in %u functions for thread "
> "%d (%s).\n"), insts, funcs, tp->num,
> @@ -236,27 +244,32 @@ ui_out_field_uint (struct ui_out *uiout, const char *fld, unsigned int val)
> /* Disassemble a section of the recorded instruction trace. */
>
> static void
> -btrace_insn_history (struct btrace_thread_info *btinfo, struct ui_out *uiout,
> - unsigned int begin, unsigned int end, int flags)
> +btrace_insn_history (struct ui_out *uiout,
> + const struct btrace_insn_iterator *begin,
> + const struct btrace_insn_iterator *end, int flags)
> {
> struct gdbarch *gdbarch;
> - struct btrace_inst *inst;
> - unsigned int idx;
> + struct btrace_insn *inst;
Dead variable 'inst'.
> + struct btrace_insn_iterator it;
>
> - DEBUG ("itrace (0x%x): [%u; %u[", flags, begin, end);
> + DEBUG ("itrace (0x%x): [%u; %u)", flags, btrace_insn_number (begin),
> + btrace_insn_number (end));
>
> gdbarch = target_gdbarch ();
>
> - for (idx = begin; VEC_iterate (btrace_inst_s, btinfo->itrace, idx, inst)
> - && idx < end; ++idx)
> + for (it = *begin; btrace_insn_cmp (&it, end) < 0; btrace_insn_next (&it, 1))
> {
> + const struct btrace_insn *insn;
> +
> + insn = btrace_insn_get (&it);
> +
> /* Print the instruction index. */
> - ui_out_field_uint (uiout, "index", idx);
> + ui_out_field_uint (uiout, "index", btrace_insn_number (&it));
> ui_out_text (uiout, "\t");
>
> /* Disassembly with '/m' flag may not produce the expected result.
> See PR gdb/11833. */
> - gdb_disassembly (gdbarch, uiout, NULL, flags, 1, inst->pc, inst->pc + 1);
> + gdb_disassembly (gdbarch, uiout, NULL, flags, 1, insn->pc, insn->pc + 1);
> }
> }
>
[...]
^ permalink raw reply [flat|nested] 24+ messages in thread* RE: [PATCH 02/15] btrace: change branch trace data structure
2013-05-13 15:25 ` Jan Kratochvil
@ 2013-05-14 15:27 ` Metzger, Markus T
2013-05-14 16:11 ` Doug Evans
0 siblings, 1 reply; 24+ messages in thread
From: Metzger, Markus T @ 2013-05-14 15:27 UTC (permalink / raw)
To: Jan Kratochvil; +Cc: gdb-patches, Himpel, Christian
> -----Original Message-----
> From: Jan Kratochvil [mailto:jan.kratochvil@redhat.com]
> Sent: Monday, May 13, 2013 5:25 PM
> To: Metzger, Markus T
> Cc: gdb-patches@sourceware.org; Himpel, Christian
> Subject: Re: [PATCH 02/15] btrace: change branch trace data structure
>
> Hi Markus,
>
> as mailed off-list you have an updated version so sending only a partial
> review as I have done so far.
Thanks for your review.
[...]
> > -/* Compute the function trace from the instruction trace. */
> > +/* Allocate and initialize a new branch trace function segment. */
>
> Something like:
>
> +/* If both FUN and MFUN are NULL it is marker of the end of trace. */
I do not understand this comment.
Actually, it turns out that we can have functions without even minimal symbol
Information. See testsuite/gdb.btrace/unknown_functions.exp in a later patch.
[...]
> > + bfun->insn_offset = prev->insn_offset
> > + + VEC_length (btrace_insn_s, prev->insn);
>
> GNU Coding Standards require multi-line expressions to use parentheses,
> therefore:
> bfun->insn_offset = (prev->insn_offset
> + VEC_length (btrace_insn_s, prev->insn));
Thanks. I believe there are more instances of this around.
[...]
> > + if (VEC_empty (btrace_insn_s, bfun->insn))
> > + continue;
>
> Shouldn't here be rather "return NULL" instead of continue? I do not
> understand in which cases BFUN->INSN can be empty. Empty INSN case could be
> also described in btrace_function->insn.
In the sense of this function, continue is the right thing to do. Nevertheless, we
currently do not generate such function segments and their handling is not
consistent. They were intended to allow adding artificial frames later on to
represent for example inlined functions.
I changed this into gdb_assert and added a comment to the declaration of
struct btrace_fucntion.
[...]
> > + /* If we didn't have a function before, we create one. */
> > + if (bfun == NULL)
> > + return ftrace_new_function (bfun, mfun, fun);
>
> mfun == NULL && fun == NULL is not allowed according to the comments in struct
> btrace_function. Already suggested gdb_assert for ftrace_new_function but it
> seems it may happen here.
I updated the comment. See above for an example.
[...]
> > + /* Check the last instruction, if we have one.
> > + We do this check first, since it allows us to fill in the call stack
> > + links in addition to the normal flow links. */
> > + last = NULL;
> > + if (!VEC_empty (btrace_insn_s, bfun->insn))
> > + last = VEC_last (btrace_insn_s, bfun->insn);
> >
> > - /* Let's see if we have source correlation, as well. */
> > - sal = find_pc_line (pc, 0);
> > - if (sal.symtab == NULL || sal.line == 0)
> > + if (last != NULL)
>
> This conditional is excessive, it is true iff !VEC_empty is true above.
I still need to check either one of them again, here. Last is used also in
the if statement below....
[...]
> > + {
> > + }
> > +
> > + /* Check if we're switching functions for some other reason. */
> > + if (ftrace_function_switched (bfun, mfun, fun))
> > + {
[...]
> > + if (last != NULL)
...here.
> > {
> > - DEBUG_FTRACE ("ignoring file at %u, pc=%s, file=%s", idx,
> > - core_addr_to_string_nz (pc), filename);
> > - continue;
> > + CORE_ADDR start, lpc;
> > +
> > + /* If we have symbol information for our current location, use
> > + it to check that we jump to the start of a function. */
> > + if (fun != NULL || mfun != NULL)
> > + start = get_pc_function_start (pc);
> > + else
> > + start = pc;
>
> This goes into implementation detail of get_pc_function_start. Rather always
> call get_pc_function_start but one should check if it failed anyway
> - get_pc_function_start returns 0 if it has failed.
>
> Or was the 'fun != NULL || mfun != NULL' check there for performance reasons?
Without symbol information, we assume that we do jump to the beginning of
a function. If we called get_pc_function_start unconditionally, we wouldn't be
able to tell from a zero return whether we knew that we did not jump to the start
of a function or whether we just don't know.
We already know that we switched functions (i.e. that the symbol information
changed). I put in this PC check as another safeguard for detecting tail calls.
[...]
> > + /* Jumps indicate optimized tail calls. */
> > + if (start == pc
> > + && gdbarch_insn_jump_p_p (gdbarch)
> > + && gdbarch_insn_jump_p (gdbarch, lpc))
> > + return ftrace_new_tailcall (bfun, mfun, fun);
>
> Cannot a plain intra-function jump be confused into a tailcall due to
> a tripped binary? FUN and MFUN are NULL for stripped binary,
> therefore "start = pc;" gets assigned above, which will call
> ftrace_new_tailcall. I did not try to reproduce it, though.
Yes, but unlikely. We already had a change in symbol information, so we're
jumping from a section with symbol information to one without (or vice versa
or within a section with symbol information but for those get_pc_function_start
should return the proper address).
[...]
> > btrace = target_read_btrace (btinfo->target, btrace_read_new);
> > - if (VEC_empty (btrace_block_s, btrace))
> > - return;
> > -
> > - btrace_clear (tp);
> > + cleanup = make_cleanup (VEC_cleanup (btrace_block_s), &btrace);
> >
> > - btinfo->btrace = btrace;
> > - btinfo->itrace = compute_itrace (btinfo->btrace);
> > - btinfo->ftrace = compute_ftrace (btinfo->itrace);
> > + if (!VEC_empty (btrace_block_s, btrace))
> > + {
> > + btrace_clear (tp);
> > + btrace_compute_ftrace (btinfo, btrace);
> > + }
>
> If BTRACE is empty shouldn't TP's btrace be still cleared?
I'm calling target_read_btrace with flag btrace_read_new. It returns an
empty block trace vector if the trace has not changed. In this case, the old
trace is still correct.
[...]
> > +enum btrace_function_flag
> > +{
> > + /* The 'up' link interpretation.
> > + If set, it points to the function segment we returned to.
> > + If clear, it points to the function segment we called from. */
> > + bfun_up_links_to_ret = (1 << 0),
> > +
> > + /* The 'up' link points to a tail call. This obviously only makes sense
> > + if bfun_up_links_to_ret is clear. */
> > + bfun_up_links_to_tailcall = (1 << 1)
>
> enum values are uppercased in GDB.
It appears to me that most enumeration constants are lowercase. If you
insist, I'll make mine UPPERCASE, but I find lowercase more readable.
> But I do not see a reason to have the flags field and this enum. You can just
> use something like
> unsigned bfun_up_links_to_ret : 1;
> unsigned bfun_up_links_to_tailcall : 1;
>
> instead of the flags field in struct btrace_function. Sometimes one wants to
> pass FLAGS as a function parameter but that is not the case here.
We don't know what other flags we might need. A flags bit-vector seems more
flexible.
[...]
> > +struct btrace_function
> > {
> > /* The full and minimal symbol for the function. One of them may be NULL. */
>
> IIUC from the code rather:
>
> +/* Iff both FUN and MFUN are NULL it is marker of the end of trace. */
That changed, meanwhile. I removed the empty function as end marker and I found
exmples where both are NULL.
[...]
> > +/* Return the instruction number for a branch trace iterator. Returns zero
>
> s/the instruction number/index of the instruction relative to start of the
> function/
I use the term "instruction number" in other places, as well.
[...]
> > struct gdbarch *gdbarch;
> > - struct btrace_inst *inst;
> > - unsigned int idx;
> > + struct btrace_insn *inst;
>
> Dead variable 'inst'.
Thanks. Couldn't we make gcc warn about this?
Regards,
Markus.
Intel GmbH
Dornacher Strasse 1
85622 Feldkirchen/Muenchen, Deutschland
Sitz der Gesellschaft: Feldkirchen bei Muenchen
Geschaeftsfuehrer: Christian Lamprechter, Hannes Schwaderer, Douglas Lusk
Registergericht: Muenchen HRB 47456
Ust.-IdNr./VAT Registration No.: DE129385895
Citibank Frankfurt a.M. (BLZ 502 109 00) 600119052
^ permalink raw reply [flat|nested] 24+ messages in thread* Re: [PATCH 02/15] btrace: change branch trace data structure
2013-05-14 15:27 ` Metzger, Markus T
@ 2013-05-14 16:11 ` Doug Evans
0 siblings, 0 replies; 24+ messages in thread
From: Doug Evans @ 2013-05-14 16:11 UTC (permalink / raw)
To: Metzger, Markus T; +Cc: Jan Kratochvil, gdb-patches, Himpel, Christian
On Tue, May 14, 2013 at 8:26 AM, Metzger, Markus T
<markus.t.metzger@intel.com> wrote:
>> enum values are uppercased in GDB.
>
> It appears to me that most enumeration constants are lowercase. If you
> insist, I'll make mine UPPERCASE, but I find lowercase more readable.
UPPERCASE if I have a say in it. :-)
^ permalink raw reply [flat|nested] 24+ messages in thread
* [PATCH 15/15] record-btrace: extend unwinder
2013-05-02 12:03 [PATCH 00/15] record-btrace: goto support Markus Metzger
` (9 preceding siblings ...)
2013-05-02 12:04 ` [PATCH 02/15] btrace: change branch trace data structure Markus Metzger
@ 2013-05-02 12:04 ` Markus Metzger
2013-05-02 15:52 ` Eli Zaretskii
2013-05-02 12:04 ` [PATCH 13/15] record-btrace: add to_wait and to_resume target methods Markus Metzger
` (3 subsequent siblings)
14 siblings, 1 reply; 24+ messages in thread
From: Markus Metzger @ 2013-05-02 12:04 UTC (permalink / raw)
To: jan.kratochvil; +Cc: gdb-patches, Eli Zaretskii
Extend the always failing unwinder to provide the PC based on the call structure
detected in the branch trace.
There are several open points:
An assertion in get_frame_id at frame.c:340 requires that a frame provides a
stack address. The record-btrace unwinder can't provide this since the trace
does not contain data. I incorrectly set stack_addr_p to 1 to avoid the
assertion.
When evaluating arguments for printing the stack back trace, there's an ugly
error displayed: "error reading variable: can't compute CFA for this frame".
The error is correct, we can't compute the CFA since we don't have the stack at
that time, but it is rather annoying at this place and makes the back trace
difficult to read.
Now that we set the PC to a different value and provide a fake unwinder, we have
the potential to affect almost every other command. How can this be tested
sufficiently? I added a few tests for the intended functionality, but nothing
so far to ensure that it does not break some other command when used in this
context.
CC: Eli Zaretskii <eliz@gnu.org>
2013-04-24 Markus Metzger <markus.t.metzger@intel.com>
* frame.h (enum frame_type) <BTRACE_FRAME>: New.
* record-btrace.c: Include hashtab.h.
(btrace_get_bfun_name): New.
(btrace_call_history): Call btrace_get_bfun_name.
(enum btrace_frame_flag): New.
(struct btrace_frame_cache): New.
(bfcache): New.
(bfcache_hash, bfcache_eq, bfcache_new): New.
(btrace_get_frame_function): New.
(record_btrace_frame_unwind_stop_reason): Allow unwinding.
(record_btrace_frame_this_id): Compute own id.
(record_btrace_frame_prev_register): Provide PC, throw_error
for all other registers.
(record_btrace_frame_sniffer): Detect btrace frames.
(record_btrace_frame_dealloc_cache): New.
(record_btrace_frame_unwind): Add new functions.
(_initialize_record_btrace): Allocate cache.
* btrace.c (btrace_clear): Call reinit_frame_cache.
* NEWS: Announce it.
testsuite/
* gdb.btrace/record_goto.exp: Add backtrace test.
* gdb.btrace/tailcall.exp: Add backtrace test.
---
gdb/NEWS | 2 +
gdb/btrace.c | 4 +
gdb/frame.h | 4 +-
gdb/record-btrace.c | 274 +++++++++++++++++++++++++++--
gdb/testsuite/gdb.btrace/record_goto.exp | 13 ++
gdb/testsuite/gdb.btrace/tailcall.exp | 17 ++
6 files changed, 294 insertions(+), 20 deletions(-)
diff --git a/gdb/NEWS b/gdb/NEWS
index ba17f7d..2a5287c 100644
--- a/gdb/NEWS
+++ b/gdb/NEWS
@@ -4,6 +4,8 @@
*** Changes since GDB 7.6
* The btrace record target supports the 'record goto' command.
+ For locations inside the execution trace, the back trace is computed
+ based on the information stored in the execution trace.
* The command 'record function-call-history' supports a new modifier '/c' to
indent the function names based on their call stack depth.
diff --git a/gdb/btrace.c b/gdb/btrace.c
index bb2e051..a2f8785 100644
--- a/gdb/btrace.c
+++ b/gdb/btrace.c
@@ -734,6 +734,10 @@ btrace_clear (struct thread_info *tp)
DEBUG ("clear thread %d (%s)", tp->num, target_pid_to_str (tp->ptid));
+ /* Make sure btrace frames that may hold a pointer into the branch
+ trace data are destroyed. */
+ reinit_frame_cache ();
+
btinfo = &tp->btrace;
it = btinfo->begin;
diff --git a/gdb/frame.h b/gdb/frame.h
index 31b9cb7..db4cc52 100644
--- a/gdb/frame.h
+++ b/gdb/frame.h
@@ -216,7 +216,9 @@ enum frame_type
ARCH_FRAME,
/* Sentinel or registers frame. This frame obtains register values
direct from the inferior's registers. */
- SENTINEL_FRAME
+ SENTINEL_FRAME,
+ /* A branch tracing frame. */
+ BTRACE_FRAME
};
/* For every stopped thread, GDB tracks two frames: current and
diff --git a/gdb/record-btrace.c b/gdb/record-btrace.c
index 388c2d7..8c95e76 100644
--- a/gdb/record-btrace.c
+++ b/gdb/record-btrace.c
@@ -34,6 +34,7 @@
#include "filenames.h"
#include "regcache.h"
#include "frame-unwind.h"
+#include "hashtab.h"
/* The target_ops of record-btrace. */
static struct target_ops record_btrace_ops;
@@ -500,6 +501,25 @@ btrace_call_history_src_line (struct ui_out *uiout,
ui_out_field_int (uiout, "max line", end);
}
+/* Get the name of a branch trace function. */
+
+static const char *
+btrace_get_bfun_name (const struct btrace_function *bfun)
+{
+ struct minimal_symbol *msym;
+ struct symbol *sym;
+
+ msym = bfun->msym;
+ sym = bfun->sym;
+
+ if (sym != NULL)
+ return SYMBOL_PRINT_NAME (sym);
+ else if (msym != NULL)
+ return SYMBOL_PRINT_NAME (msym);
+ else
+ return "<unknown>";
+}
+
/* Disassemble a section of the recorded function trace. */
static void
@@ -515,12 +535,6 @@ btrace_call_history (struct ui_out *uiout,
for (bfun = begin; bfun != end; bfun = bfun->flow.next)
{
- struct minimal_symbol *msym;
- struct symbol *sym;
-
- msym = bfun->msym;
- sym = bfun->sym;
-
/* Print the function index. */
ui_out_field_uint (uiout, "index", bfun->number);
ui_out_text (uiout, "\t");
@@ -533,12 +547,7 @@ btrace_call_history (struct ui_out *uiout,
ui_out_text (uiout, " ");
}
- if (sym != NULL)
- ui_out_field_string (uiout, "function", SYMBOL_PRINT_NAME (sym));
- else if (msym != NULL)
- ui_out_field_string (uiout, "function", SYMBOL_PRINT_NAME (msym));
- else
- ui_out_field_string (uiout, "function", "<unknown>");
+ ui_out_field_string (uiout, "function", btrace_get_bfun_name (bfun));
if ((flags & record_print_insn_range) != 0)
{
@@ -942,13 +951,114 @@ record_btrace_prepare_to_store (struct target_ops *ops,
}
}
+/* A collection of branch trace frame flags. */
+
+enum btrace_frame_flag
+ {
+ /* The PC when unwinding into this frame points to the first insn. */
+ pc_is_first = 1 << 0,
+
+ /* The PC when unwinding into this frame points to the last insn. */
+ pc_is_last = 1 << 1,
+
+ /* The PC when unwinding into this frame points to the instruction
+ following the last insn. */
+ pc_is_next = 1 << 2
+ };
+
+/* The branch trace frame cache. */
+
+struct btrace_frame_cache
+{
+ /* The thread. */
+ struct thread_info *tp;
+
+ /* The frame info. */
+ struct frame_info *frame;
+
+ /* The branch trace function segment. */
+ struct btrace_function *bfun;
+
+ /* A bit-vector of btrace frame flags. */
+ unsigned int flags;
+};
+
+/* A struct btrace_frame_cache hash table indexed by NEXT. */
+
+static htab_t bfcache;
+
+/* hash_f for htab_create_alloc of bfcache. */
+
+static hashval_t
+bfcache_hash (const void *arg)
+{
+ const struct btrace_frame_cache *cache = arg;
+
+ return htab_hash_pointer (cache->frame);
+}
+
+/* eq_f for htab_create_alloc of bfcache. */
+
+static int
+bfcache_eq (const void *arg1, const void *arg2)
+{
+ const struct btrace_frame_cache *cache1 = arg1;
+ const struct btrace_frame_cache *cache2 = arg2;
+
+ return cache1->frame == cache2->frame;
+}
+
+/* Create a new btrace frame cache. */
+
+static struct btrace_frame_cache *
+bfcache_new (struct frame_info *frame)
+{
+ struct btrace_frame_cache *cache;
+ void **slot;
+
+ cache = FRAME_OBSTACK_ZALLOC (struct btrace_frame_cache);
+ cache->frame = frame;
+
+ slot = htab_find_slot (bfcache, cache, INSERT);
+ gdb_assert (*slot == NULL);
+ *slot = cache;
+
+ return cache;
+}
+
+/* Extract the branch trace function from a branch trace frame. */
+
+static struct btrace_function *
+btrace_get_frame_function (struct frame_info *frame)
+{
+ struct btrace_frame_cache *cache, pattern;
+ struct btrace_function *bfun;
+ void **slot;
+
+ pattern.frame = frame;
+
+ slot = htab_find_slot (bfcache, &pattern, NO_INSERT);
+ if (slot == NULL)
+ return NULL;
+
+ cache = *slot;
+ return cache->bfun;
+}
+
/* Implement stop_reason method for record_btrace_frame_unwind. */
static enum unwind_stop_reason
record_btrace_frame_unwind_stop_reason (struct frame_info *this_frame,
void **this_cache)
{
- return UNWIND_UNAVAILABLE;
+ struct btrace_frame_cache *cache;
+
+ cache = *this_cache;
+
+ if (cache->bfun == NULL)
+ return UNWIND_UNAVAILABLE;
+
+ return UNWIND_NO_REASON;
}
/* Implement this_id method for record_btrace_frame_unwind. */
@@ -957,7 +1067,22 @@ static void
record_btrace_frame_this_id (struct frame_info *this_frame, void **this_cache,
struct frame_id *this_id)
{
- /* Leave there the outer_frame_id value. */
+ struct btrace_frame_cache *cache;
+
+ cache = *this_cache;
+
+ memset (this_id, 0, sizeof (*this_id));
+
+ this_id->code_addr = get_frame_pc (this_frame);
+ this_id->code_addr_p = 1;
+
+ /* GDB requires frame id's to provide a stack address. */
+ this_id->stack_addr_p = 1;
+
+ /* We encode the btrace function pointer into the frame id to get
+ frame ids unique. */
+ this_id->special_addr = (CORE_ADDR) cache->bfun;
+ this_id->special_addr_p = 1;
}
/* Implement prev_register method for record_btrace_frame_unwind. */
@@ -967,8 +1092,46 @@ record_btrace_frame_prev_register (struct frame_info *this_frame,
void **this_cache,
int regnum)
{
- throw_error (NOT_AVAILABLE_ERROR,
- _("Registers are not available in btrace record history"));
+ struct btrace_frame_cache *cache;
+ struct btrace_function *bfun;
+ struct btrace_insn *insn;
+ struct gdbarch *gdbarch;
+ CORE_ADDR pc;
+ int pcreg;
+
+ gdbarch = get_frame_arch (this_frame);
+ pcreg = gdbarch_pc_regnum (gdbarch);
+ if (pcreg < 0 || regnum != pcreg)
+ throw_error (NOT_AVAILABLE_ERROR,
+ _("Registers are not available in btrace record history"));
+
+ cache = *this_cache;
+ bfun = cache->bfun;
+ gdb_assert (bfun != NULL);
+
+ if (VEC_empty (btrace_insn_s, bfun->insn))
+ throw_error (NOT_AVAILABLE_ERROR,
+ _("Registers are not available in btrace record history"));
+
+ if ((cache->flags & pc_is_first) != 0)
+ {
+ insn = VEC_index (btrace_insn_s, bfun->insn, 0);
+ pc = insn->pc;
+ }
+ else
+ {
+ insn = VEC_last (btrace_insn_s, bfun->insn);
+ pc = insn->pc;
+
+ if ((cache->flags & pc_is_next) != 0)
+ pc += gdb_insn_length (gdbarch, pc);
+ }
+
+ DEBUG ("[frame] unwound PC for %s on level %d: %s",
+ btrace_get_bfun_name (bfun), bfun->level,
+ core_addr_to_string_nz (pc));
+
+ return frame_unwind_got_address (this_frame, regnum, pc);
}
/* Implement sniffer method for record_btrace_frame_unwind. */
@@ -981,6 +1144,10 @@ record_btrace_frame_sniffer (const struct frame_unwind *self,
struct thread_info *tp;
struct btrace_thread_info *btinfo;
struct btrace_insn_iterator *replay;
+ struct btrace_frame_cache *cache;
+ struct btrace_function *bfun;
+ struct frame_info *next;
+ unsigned int flags;
/* This doesn't seem right. Yet, I don't see how I could get from a frame
to its thread. */
@@ -988,7 +1155,72 @@ record_btrace_frame_sniffer (const struct frame_unwind *self,
if (tp == NULL)
return 0;
- return btrace_is_replaying (tp);
+ replay = tp->btrace.replay;
+ if (replay == NULL)
+ return 0;
+
+ /* Find the next frame's branch trace function. */
+ flags = 0;
+ next = get_next_frame (this_frame);
+ if (next == NULL)
+ {
+ /* The sentinel frame below corresponds to our replay position. */
+ bfun = replay->function;
+ flags |= pc_is_last;
+ }
+ else
+ {
+ /* This is an outer frame. It must be the predecessor of another
+ branch trace frame. Let's get this frame's branch trace function
+ so we can compute our own. */
+ bfun = btrace_get_frame_function (next);
+
+ if (bfun != NULL)
+ {
+ if ((bfun->flags & bfun_up_links_to_ret) != 0)
+ flags |= pc_is_first;
+ else if ((bfun->flags & bfun_up_links_to_tailcall) != 0)
+ flags |= pc_is_last;
+ else
+ flags |= pc_is_next;
+ }
+ }
+
+ /* If we did not find a branch trace function, this is not our frame. */
+ if (bfun == NULL)
+ return 0;
+
+ /* Go up to the calling function segment. */
+ bfun = bfun->up;
+
+ if (bfun != NULL)
+ DEBUG ("[frame] sniffed frame for %s on level %d",
+ btrace_get_bfun_name (bfun), bfun->level);
+ else
+ DEBUG ("[frame] sniffed top btrace frame");
+
+ /* This is our frame. Initialize the frame cache. */
+ cache = bfcache_new (this_frame);
+ cache->tp = tp;
+ cache->bfun = bfun;
+ cache->flags = flags;
+
+ *this_cache = cache;
+ return 1;
+}
+
+static void
+record_btrace_frame_dealloc_cache (struct frame_info *self, void *this_cache)
+{
+ struct btrace_frame_cache *cache;
+ void **slot;
+
+ cache = this_cache;
+
+ slot = htab_find_slot (bfcache, cache, NO_INSERT);
+ gdb_assert (slot != NULL);
+
+ htab_remove_elt (bfcache, cache);
}
/* btrace recording does not store previous memory content, neither the stack
@@ -999,12 +1231,13 @@ record_btrace_frame_sniffer (const struct frame_unwind *self,
static const struct frame_unwind record_btrace_frame_unwind =
{
- NORMAL_FRAME,
+ BTRACE_FRAME,
record_btrace_frame_unwind_stop_reason,
record_btrace_frame_this_id,
record_btrace_frame_prev_register,
NULL,
- record_btrace_frame_sniffer
+ record_btrace_frame_sniffer,
+ record_btrace_frame_dealloc_cache
};
/* The to_resume method of target record-btrace. */
@@ -1201,4 +1434,7 @@ _initialize_record_btrace (void)
init_record_btrace_ops ();
add_target (&record_btrace_ops);
+
+ bfcache = htab_create_alloc (50, bfcache_hash, bfcache_eq, NULL,
+ xcalloc, xfree);
}
diff --git a/gdb/testsuite/gdb.btrace/record_goto.exp b/gdb/testsuite/gdb.btrace/record_goto.exp
index 008d956..a045d9f 100644
--- a/gdb/testsuite/gdb.btrace/record_goto.exp
+++ b/gdb/testsuite/gdb.btrace/record_goto.exp
@@ -76,6 +76,19 @@ gdb_test "record instruction-history" "
gdb_test "record goto 25" "
.*fun3 \\(\\) at record_goto.c:35.*" "record_goto - goto 25"
+# check the back trace at that location
+gdb_test "backtrace" "
+#0.*fun3.*at record_goto.c:35.*\r
+#1.*fun4.*at record_goto.c:43.*\r
+#2.*main.*at record_goto.c:51.*\r
+Backtrace stopped: not enough registers or memory available to unwind further" "backtrace at 25"
+
+# walk the backtrace
+gdb_test "up" "
+.*fun4.*at record_goto.c:43.*" "up to fun4"
+gdb_test "up" "
+.*main.*at record_goto.c:51.*" "up to main"
+
# the function call history should start at the new location
gdb_test "record function-call-history /ci -" "
7\t fun3\tinst 18,20\r
diff --git a/gdb/testsuite/gdb.btrace/tailcall.exp b/gdb/testsuite/gdb.btrace/tailcall.exp
index 8e47a12..0b1bd61 100644
--- a/gdb/testsuite/gdb.btrace/tailcall.exp
+++ b/gdb/testsuite/gdb.btrace/tailcall.exp
@@ -47,3 +47,20 @@ gdb_test "record function-call-history /c 0" "
0\t foo\r
1\t bar\r
2\tmain" "tailcall - calls indented"
+
+# go into bar
+gdb_test "record goto 2" "
+.*bar \\(\\) at .*x86-tailcall.c:24.*" "go to bar"
+
+# check the backtrace
+gdb_test "backtrace" "
+#0.*bar.*at .*x86-tailcall.c:24.*\r
+#1.*foo.*at .*x86-tailcall.c:29.*\r
+#2.*main.*at .*x86-tailcall.c:37.*\r
+Backtrace stopped: not enough registers or memory available to unwind further" "backtrace in bar"
+
+# walk the backtrace
+gdb_test "up" "
+.*foo \\(\\) at .*x86-tailcall.c:29.*" "up to foo"
+gdb_test "up" "
+.*main \\(\\) at .*x86-tailcall.c:37.*" "up to main"
--
1.7.1
^ permalink raw reply [flat|nested] 24+ messages in thread* [PATCH 13/15] record-btrace: add to_wait and to_resume target methods.
2013-05-02 12:03 [PATCH 00/15] record-btrace: goto support Markus Metzger
` (10 preceding siblings ...)
2013-05-02 12:04 ` [PATCH 15/15] record-btrace: extend unwinder Markus Metzger
@ 2013-05-02 12:04 ` Markus Metzger
2013-05-02 12:04 ` [PATCH 04/15] btrace: increase buffer size Markus Metzger
` (2 subsequent siblings)
14 siblings, 0 replies; 24+ messages in thread
From: Markus Metzger @ 2013-05-02 12:04 UTC (permalink / raw)
To: jan.kratochvil; +Cc: gdb-patches
Add simple to_wait and to_resume target methods that prevent stepping when the
current replay position is not at the end of the execution log.
2013-05-02 Markus Metzger <markus.t.metzger@intel.com>
* record-btrace.c (record_btrace_resume): New.
(record_btrace_wait): New.
(init_record_btrace_ops): Initialize to_wait and to_resume.
---
gdb/record-btrace.c | 43 +++++++++++++++++++++++++++++++++++++++++++
1 files changed, 43 insertions(+), 0 deletions(-)
diff --git a/gdb/record-btrace.c b/gdb/record-btrace.c
index cd17c77..a16e7ea 100644
--- a/gdb/record-btrace.c
+++ b/gdb/record-btrace.c
@@ -1006,6 +1006,47 @@ static const struct frame_unwind record_btrace_frame_unwind =
NULL,
record_btrace_frame_sniffer
};
+
+/* The to_resume method of target record-btrace. */
+
+static void
+record_btrace_resume (struct target_ops *ops, ptid_t ptid, int step,
+ enum gdb_signal signal)
+{
+ struct thread_info *tp;
+ struct target_ops *t;
+
+ if (record_btrace_is_replaying ())
+ error (_("You can't do this from here. Do 'record goto end', first."));
+
+ for (t = ops->beneath; t != NULL; t = t->beneath)
+ if (t->to_resume != NULL)
+ break;
+
+ if (t == NULL)
+ error (_("Cannot find target for stepping."));
+
+ t->to_resume (t, ptid, step, signal);
+}
+
+/* The to_wait method of target record-btrace. */
+
+static ptid_t
+record_btrace_wait (struct target_ops *ops, ptid_t ptid,
+ struct target_waitstatus *status, int options)
+{
+ struct target_ops *t;
+
+ for (t = ops->beneath; t != NULL; t = t->beneath)
+ if (t->to_wait != NULL)
+ break;
+
+ if (t == NULL)
+ error (_("Cannot find target for stepping."));
+
+ return t->to_wait (t, ptid, status, options);
+}
+
/* Initialize the record-btrace target ops. */
static void
@@ -1038,6 +1079,8 @@ init_record_btrace_ops (void)
ops->to_store_registers = record_btrace_store_registers;
ops->to_prepare_to_store = record_btrace_prepare_to_store;
ops->to_get_unwinder = &record_btrace_frame_unwind;
+ ops->to_resume = record_btrace_resume;
+ ops->to_wait = record_btrace_wait;
ops->to_stratum = record_stratum;
ops->to_magic = OPS_MAGIC;
}
--
1.7.1
^ permalink raw reply [flat|nested] 24+ messages in thread* [PATCH 04/15] btrace: increase buffer size
2013-05-02 12:03 [PATCH 00/15] record-btrace: goto support Markus Metzger
` (11 preceding siblings ...)
2013-05-02 12:04 ` [PATCH 13/15] record-btrace: add to_wait and to_resume target methods Markus Metzger
@ 2013-05-02 12:04 ` Markus Metzger
2013-05-02 12:04 ` [PATCH 11/15] record-btrace, frame: supply target-specific unwinder Markus Metzger
2013-05-02 12:04 ` [PATCH 05/15] record-btrace: optionally indent function call history Markus Metzger
14 siblings, 0 replies; 24+ messages in thread
From: Markus Metzger @ 2013-05-02 12:04 UTC (permalink / raw)
To: jan.kratochvil; +Cc: gdb-patches
Try to allocate as much buffer as we can for each thread with a maximum
of 4MB.
2013-05-02 Markus Metzger <markus.t.metzger@intel.com>
* common/linux-btrace.c (linux_enable_btrace): Increase buffer.
---
gdb/common/linux-btrace.c | 25 +++++++++++++++----------
1 files changed, 15 insertions(+), 10 deletions(-)
diff --git a/gdb/common/linux-btrace.c b/gdb/common/linux-btrace.c
index ed0cb24..a0c4027 100644
--- a/gdb/common/linux-btrace.c
+++ b/gdb/common/linux-btrace.c
@@ -446,7 +446,7 @@ struct btrace_target_info *
linux_enable_btrace (ptid_t ptid)
{
struct btrace_target_info *tinfo;
- int pid;
+ int pid, pg;
tinfo = xzalloc (sizeof (*tinfo));
tinfo->ptid = ptid;
@@ -474,17 +474,22 @@ linux_enable_btrace (ptid_t ptid)
if (tinfo->file < 0)
goto err;
- /* We hard-code the trace buffer size.
- At some later time, we should make this configurable. */
- tinfo->size = 1;
- tinfo->buffer = mmap (NULL, perf_event_mmap_size (tinfo),
- PROT_READ, MAP_SHARED, tinfo->file, 0);
- if (tinfo->buffer == MAP_FAILED)
- goto err_file;
+ /* We try to allocate as much buffer as we can get.
+ We could allow the user to specify the size of the buffer, but then
+ we'd leave this search for the maximum buffer size to him. */
+ for (pg = 10; pg >= 0; --pg)
+ {
+ /* The number of pages we request needs to be a power of two. */
+ tinfo->size = 1 << pg;
+ tinfo->buffer = mmap (NULL, perf_event_mmap_size (tinfo),
+ PROT_READ, MAP_SHARED, tinfo->file, 0);
+ if (tinfo->buffer == MAP_FAILED)
+ continue;
- return tinfo;
+ return tinfo;
+ }
- err_file:
+ /* We were not able to allocate any buffer. */
close (tinfo->file);
err:
--
1.7.1
^ permalink raw reply [flat|nested] 24+ messages in thread* [PATCH 11/15] record-btrace, frame: supply target-specific unwinder
2013-05-02 12:03 [PATCH 00/15] record-btrace: goto support Markus Metzger
` (12 preceding siblings ...)
2013-05-02 12:04 ` [PATCH 04/15] btrace: increase buffer size Markus Metzger
@ 2013-05-02 12:04 ` Markus Metzger
2013-05-02 12:04 ` [PATCH 05/15] record-btrace: optionally indent function call history Markus Metzger
14 siblings, 0 replies; 24+ messages in thread
From: Markus Metzger @ 2013-05-02 12:04 UTC (permalink / raw)
To: jan.kratochvil; +Cc: gdb-patches
Supply a target-specific frame unwinder for the record-btrace target that does
not allow unwinding while replaying.
2013-02-11 Jan Kratochvil <jan.kratochvil@redhat.com>
Markus Metzger <markus.t.metzger@intel.com>
gdb/
* record-btrace.c: Include frame-unwind.h.
(record_btrace_frame_unwind_stop_reason,
record_btrace_frame_this_id, record_btrace_frame_prev_register,
record_btrace_frame_sniffer, record_btrace_frame_unwind):
New.
(init_record_btrace_ops): Install it.
---
gdb/record-btrace.c | 66 +++++++++++++++++++++++++++++++++++++++++++++++++++
1 files changed, 66 insertions(+), 0 deletions(-)
diff --git a/gdb/record-btrace.c b/gdb/record-btrace.c
index 2299899..20c61b7 100644
--- a/gdb/record-btrace.c
+++ b/gdb/record-btrace.c
@@ -33,6 +33,7 @@
#include "symtab.h"
#include "filenames.h"
#include "regcache.h"
+#include "frame-unwind.h"
/* The target_ops of record-btrace. */
static struct target_ops record_btrace_ops;
@@ -883,6 +884,70 @@ record_btrace_prepare_to_store (struct target_ops *ops,
}
}
+/* Implement stop_reason method for record_btrace_frame_unwind. */
+
+static enum unwind_stop_reason
+record_btrace_frame_unwind_stop_reason (struct frame_info *this_frame,
+ void **this_cache)
+{
+ return UNWIND_UNAVAILABLE;
+}
+
+/* Implement this_id method for record_btrace_frame_unwind. */
+
+static void
+record_btrace_frame_this_id (struct frame_info *this_frame, void **this_cache,
+ struct frame_id *this_id)
+{
+ /* Leave there the outer_frame_id value. */
+}
+
+/* Implement prev_register method for record_btrace_frame_unwind. */
+
+static struct value *
+record_btrace_frame_prev_register (struct frame_info *this_frame,
+ void **this_cache,
+ int regnum)
+{
+ throw_error (NOT_AVAILABLE_ERROR,
+ _("Registers are not available in btrace record history"));
+}
+
+/* Implement sniffer method for record_btrace_frame_unwind. */
+
+static int
+record_btrace_frame_sniffer (const struct frame_unwind *self,
+ struct frame_info *this_frame,
+ void **this_cache)
+{
+ struct thread_info *tp;
+ struct btrace_thread_info *btinfo;
+ struct btrace_insn_iterator *replay;
+
+ /* This doesn't seem right. Yet, I don't see how I could get from a frame
+ to its thread. */
+ tp = find_thread_ptid (inferior_ptid);
+ if (tp == NULL)
+ return 0;
+
+ return btrace_is_replaying (tp);
+}
+
+/* btrace recording does not store previous memory content, neither the stack
+ frames content. Any unwinding would return errorneous results as the stack
+ contents no longer matches the changed PC value restored from history.
+ Therefore this unwinder reports any possibly unwound registers as
+ <unavailable>. */
+
+static const struct frame_unwind record_btrace_frame_unwind =
+{
+ NORMAL_FRAME,
+ record_btrace_frame_unwind_stop_reason,
+ record_btrace_frame_this_id,
+ record_btrace_frame_prev_register,
+ NULL,
+ record_btrace_frame_sniffer
+};
/* Initialize the record-btrace target ops. */
static void
@@ -913,6 +978,7 @@ init_record_btrace_ops (void)
ops->to_fetch_registers = record_btrace_fetch_registers;
ops->to_store_registers = record_btrace_store_registers;
ops->to_prepare_to_store = record_btrace_prepare_to_store;
+ ops->to_get_unwinder = &record_btrace_frame_unwind;
ops->to_stratum = record_stratum;
ops->to_magic = OPS_MAGIC;
}
--
1.7.1
^ permalink raw reply [flat|nested] 24+ messages in thread* [PATCH 05/15] record-btrace: optionally indent function call history
2013-05-02 12:03 [PATCH 00/15] record-btrace: goto support Markus Metzger
` (13 preceding siblings ...)
2013-05-02 12:04 ` [PATCH 11/15] record-btrace, frame: supply target-specific unwinder Markus Metzger
@ 2013-05-02 12:04 ` Markus Metzger
2013-05-02 17:10 ` Eli Zaretskii
14 siblings, 1 reply; 24+ messages in thread
From: Markus Metzger @ 2013-05-02 12:04 UTC (permalink / raw)
To: jan.kratochvil; +Cc: gdb-patches, Eli Zaretskii, Christian Himpel
Add a new modifier /c to the "record function-call-history" command to
indent the function name based on its depth in the call stack.
Also reorder the optional fields to have the indentation at the very beginning.
Prefix the insn range (/i modifier) with "inst ".
Prefix the source line (/l modifier) with "at ".
Change the range syntax from "begin-end" to "begin,end" to allow copy&paste to
the "record instruction-history" and "list" commands.
Adjust the respective tests and add new tests for the /c modifier.
CC: Eli Zaretskii <eliz@gnu.org>
CC: Christian Himpel <christian.himpel@intel.com>
2013-05-02 Markus Metzger <markus.t.metzger@intel.com>
* record.h (enum record_print_flag)
<record_print_indent_calls>: New.
* record.c (get_call_history_modifiers): Recognize /c modifier.
(_initialize_record): Document /c modifier.
* record-btrace.c (btrace_call_history): Add btinfo parameter.
Reorder fields. Optionally indent the function name. Update
all users.
* NEWS: Announce changes.
testsuite/
* gdb.btrace/function_call_history.exp: Fix expected field
order for "record function-call-history".
Add new tests for "record function-call-history /c".
* gdb.btrace/exception.cc: New.
* gdb.btrace/exception.exp: New.
* gdb.btrace/tailcall.exp: New.
* gdb.btrace/x86-tailcall.S: New.
* gdb.btrace/x86-tailcall.c: New.
* gdb.btrace/unknown_functions.c: New.
* gdb.btrace/unknown_functions.exp: New.
* gdb.btrace/Makefile.in (EXECUTABLES): Add new.
doc/
* gdb.texinfo (Process Record and Replay): Document new /c
modifier accepted by "record function-call-history".
---
gdb/NEWS | 6 +
gdb/doc/gdb.texinfo | 12 +-
gdb/record-btrace.c | 33 ++-
gdb/record.c | 4 +
gdb/record.h | 3 +
gdb/testsuite/gdb.btrace/Makefile.in | 3 +-
gdb/testsuite/gdb.btrace/exception.cc | 56 ++++
gdb/testsuite/gdb.btrace/exception.exp | 64 +++++
gdb/testsuite/gdb.btrace/function_call_history.exp | 112 +++++++--
gdb/testsuite/gdb.btrace/tailcall.exp | 49 ++++
gdb/testsuite/gdb.btrace/unknown_functions.c | 45 ++++
gdb/testsuite/gdb.btrace/unknown_functions.exp | 58 +++++
gdb/testsuite/gdb.btrace/x86-tailcall.S | 269 ++++++++++++++++++++
gdb/testsuite/gdb.btrace/x86-tailcall.c | 39 +++
14 files changed, 715 insertions(+), 38 deletions(-)
create mode 100644 gdb/testsuite/gdb.btrace/exception.cc
create mode 100644 gdb/testsuite/gdb.btrace/exception.exp
create mode 100644 gdb/testsuite/gdb.btrace/tailcall.exp
create mode 100644 gdb/testsuite/gdb.btrace/unknown_functions.c
create mode 100644 gdb/testsuite/gdb.btrace/unknown_functions.exp
create mode 100644 gdb/testsuite/gdb.btrace/x86-tailcall.S
create mode 100644 gdb/testsuite/gdb.btrace/x86-tailcall.c
diff --git a/gdb/NEWS b/gdb/NEWS
index 76b48e8..2eeb59d 100644
--- a/gdb/NEWS
+++ b/gdb/NEWS
@@ -3,6 +3,12 @@
*** Changes since GDB 7.6
+* The command 'record function-call-history' supports a new modifier '/c' to
+ indent the function names based on their call stack depth.
+ The fields for the '/i' and '/l' modifier have been reordered.
+ The instruction range is now prefixed with 'insn'.
+ The source line range is now prefixed with 'at'.
+
* New commands:
catch rethrow
Like "catch throw", but catches a re-thrown exception.
diff --git a/gdb/doc/gdb.texinfo b/gdb/doc/gdb.texinfo
index 47b1188..99af587 100644
--- a/gdb/doc/gdb.texinfo
+++ b/gdb/doc/gdb.texinfo
@@ -6370,7 +6370,9 @@ line for each sequence of instructions that belong to the same
function giving the name of that function, the source lines
for this instruction sequence (if the @code{/l} modifier is
specified), and the instructions numbers that form the sequence (if
-the @code{/i} modifier is specified).
+the @code{/i} modifier is specified). The function names are indented
+to reflect the call stack depth if the @code{/c} modifier is
+specified.
@smallexample
(@value{GDBP}) @b{list 1, 10}
@@ -6384,10 +6386,10 @@ the @code{/i} modifier is specified).
8 foo ();
9 ...
10 @}
-(@value{GDBP}) @b{record function-call-history /l}
-1 foo.c:6-8 bar
-2 foo.c:2-3 foo
-3 foo.c:9-10 bar
+(@value{GDBP}) @b{record function-call-history /lc}
+1 bar at foo.c:6,8
+2 foo at foo.c:2,3
+3 bar at foo.c:9,10
@end smallexample
By default, ten lines are printed. This can be changed using the
diff --git a/gdb/record-btrace.c b/gdb/record-btrace.c
index d1c9293..9d73286 100644
--- a/gdb/record-btrace.c
+++ b/gdb/record-btrace.c
@@ -428,7 +428,7 @@ btrace_call_history_insn_range (struct ui_out *uiout,
end = begin + size - 1;
ui_out_field_uint (uiout, "insn begin", begin);
- ui_out_text (uiout, "-");
+ ui_out_text (uiout, ",");
ui_out_field_uint (uiout, "insn end", end);
}
@@ -460,7 +460,7 @@ btrace_call_history_src_line (struct ui_out *uiout,
if (end == begin)
return;
- ui_out_text (uiout, "-");
+ ui_out_text (uiout, ",");
ui_out_field_int (uiout, "max line", end);
}
@@ -468,6 +468,7 @@ btrace_call_history_src_line (struct ui_out *uiout,
static void
btrace_call_history (struct ui_out *uiout,
+ const struct btrace_thread_info *btinfo,
const struct btrace_function *begin,
const struct btrace_function *end,
enum record_print_flag flags)
@@ -488,23 +489,33 @@ btrace_call_history (struct ui_out *uiout,
ui_out_field_uint (uiout, "index", bfun->number);
ui_out_text (uiout, "\t");
+ if ((flags & record_print_indent_calls) != 0)
+ {
+ int level = bfun->level + btinfo->level, i;
+
+ for (i = 0; i < level; ++i)
+ ui_out_text (uiout, " ");
+ }
+
+ if (sym != NULL)
+ ui_out_field_string (uiout, "function", SYMBOL_PRINT_NAME (sym));
+ else if (msym != NULL)
+ ui_out_field_string (uiout, "function", SYMBOL_PRINT_NAME (msym));
+ else
+ ui_out_field_string (uiout, "function", "<unknown>");
+
if ((flags & record_print_insn_range) != 0)
{
+ ui_out_text (uiout, "\tinst ");
btrace_call_history_insn_range (uiout, bfun);
- ui_out_text (uiout, "\t");
}
if ((flags & record_print_src_line) != 0)
{
+ ui_out_text (uiout, "\tat ");
btrace_call_history_src_line (uiout, bfun);
- ui_out_text (uiout, "\t");
}
- if (sym != NULL)
- ui_out_field_string (uiout, "function", SYMBOL_PRINT_NAME (sym));
- else if (msym != NULL)
- ui_out_field_string (uiout, "function", SYMBOL_PRINT_NAME (msym));
-
ui_out_text (uiout, "\n");
}
}
@@ -615,7 +626,7 @@ record_btrace_call_history (int size, int flags)
}
if (covered > 0)
- btrace_call_history (uiout, begin, end, flags);
+ btrace_call_history (uiout, btinfo, begin, end, flags);
else
{
if (size < 0)
@@ -666,7 +677,7 @@ record_btrace_call_history_range (ULONGEST from, ULONGEST to, int flags)
if (end == NULL)
end = btinfo->end;
- btrace_call_history (uiout, begin, end, flags);
+ btrace_call_history (uiout, btinfo, begin, end, flags);
btrace_set_call_history (btinfo, begin, end);
do_cleanups (uiout_cleanup);
diff --git a/gdb/record.c b/gdb/record.c
index 76d9fd2..b48b138 100644
--- a/gdb/record.c
+++ b/gdb/record.c
@@ -558,6 +558,9 @@ get_call_history_modifiers (char **arg)
case 'i':
modifiers |= record_print_insn_range;
break;
+ case 'c':
+ modifiers |= record_print_indent_calls;
+ break;
default:
error (_("Invalid modifier: %c."), *args);
}
@@ -783,6 +786,7 @@ function.\n\
Without modifiers, it prints the function name.\n\
With a /l modifier, the source file and line number range is included.\n\
With a /i modifier, the instruction number range is included.\n\
+With a /c modifier, the output is indented based on the call stack depth.\n\
With no argument, prints ten more lines after the previous ten-line print.\n\
\"record function-call-history -\" prints ten lines before a previous ten-line \
print.\n\
diff --git a/gdb/record.h b/gdb/record.h
index 86e6bc6..0ca8753 100644
--- a/gdb/record.h
+++ b/gdb/record.h
@@ -40,6 +40,9 @@ enum record_print_flag
/* Print the instruction number range (if applicable). */
record_print_insn_range = (1 << 1),
+
+ /* Indent based on call stack depth (if applicable). */
+ record_print_indent_calls = (1 << 2)
};
/* Wrapper for target_read_memory that prints a debug message if
diff --git a/gdb/testsuite/gdb.btrace/Makefile.in b/gdb/testsuite/gdb.btrace/Makefile.in
index f4c06d1..5c70700 100644
--- a/gdb/testsuite/gdb.btrace/Makefile.in
+++ b/gdb/testsuite/gdb.btrace/Makefile.in
@@ -1,7 +1,8 @@
VPATH = @srcdir@
srcdir = @srcdir@
-EXECUTABLES = enable function_call_history instruction_history
+EXECUTABLES = enable function_call_history instruction_history tailcall \
+ exception
MISCELLANEOUS =
diff --git a/gdb/testsuite/gdb.btrace/exception.cc b/gdb/testsuite/gdb.btrace/exception.cc
new file mode 100644
index 0000000..029a4bc
--- /dev/null
+++ b/gdb/testsuite/gdb.btrace/exception.cc
@@ -0,0 +1,56 @@
+/* This testcase is part of GDB, the GNU debugger.
+
+ Copyright 2013 Free Software Foundation, Inc.
+
+ Contributed by Intel Corp. <markus.t.metzger@intel.com>
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see <http://www.gnu.org/licenses/>. */
+
+static void
+bad (void)
+{
+ throw 42;
+}
+
+static void
+bar (void)
+{
+ bad ();
+}
+
+static void
+foo (void)
+{
+ bar ();
+}
+
+static void
+test (void)
+{
+ try
+ {
+ foo ();
+ }
+ catch (...)
+ {
+ }
+}
+
+int
+main (void)
+{
+ test ();
+ test (); /* bp.1 */
+ return 0; /* bp.2 */
+}
diff --git a/gdb/testsuite/gdb.btrace/exception.exp b/gdb/testsuite/gdb.btrace/exception.exp
new file mode 100644
index 0000000..9a12d95
--- /dev/null
+++ b/gdb/testsuite/gdb.btrace/exception.exp
@@ -0,0 +1,64 @@
+# This testcase is part of GDB, the GNU debugger.
+#
+# Copyright 2013 Free Software Foundation, Inc.
+#
+# Contributed by Intel Corp. <markus.t.metzger@intel.com>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+# check for btrace support
+if { [skip_btrace_tests] } { return -1 }
+
+# start inferior
+standard_testfile exception.cc
+if [prepare_for_testing $testfile.exp $testfile $srcfile {c++ debug}] {
+ return -1
+}
+if ![runto_main] {
+ return -1
+}
+
+# we want to see the full trace for this test
+gdb_test_no_output "set record function-call-history-size 0"
+
+# set bp
+set bp_1 [gdb_get_line_number "bp.1" $srcfile]
+set bp_2 [gdb_get_line_number "bp.2" $srcfile]
+gdb_breakpoint $bp_1
+gdb_breakpoint $bp_2
+
+# trace the code between thw two breakpoints
+gdb_continue_to_breakpoint "cont to $bp_1" ".*$srcfile:$bp_1.*"
+gdb_test_no_output "record btrace"
+gdb_continue_to_breakpoint "cont to $bp_2" ".*$srcfile:$bp_2.*"
+
+# show the flat branch trace
+gdb_test "record function-call-history 0" "
+0\ttest\\(\\)\r
+1\tfoo\\(\\)\r
+2\tbar\\(\\)\r
+3\tbad\\(\\)\r
+.*\r
+\[0-9\]*\ttest\\(\\)\r
+\[0-9\]*\tmain\\(\\)" "exception - flat"
+
+# show the branch trace with calls indented
+gdb_test "record function-call-history /c 0" "
+0\t test\\(\\)\r
+1\t foo\\(\\)\r
+2\t bar\\(\\)\r
+3\t bad\\(\\)\r
+.*\r
+\[0-9\]*\t test\\(\\)\r
+\[0-9\]*\tmain\\(\\)" "exception - calls indented"
diff --git a/gdb/testsuite/gdb.btrace/function_call_history.exp b/gdb/testsuite/gdb.btrace/function_call_history.exp
index 7658637..e528625 100644
--- a/gdb/testsuite/gdb.btrace/function_call_history.exp
+++ b/gdb/testsuite/gdb.btrace/function_call_history.exp
@@ -62,6 +62,30 @@ gdb_test "record function-call-history" "
19\tinc\r
20\tmain\r" "record function-call-history - with size unlimited"
+# show indented function call history with unlimited size
+gdb_test "record function-call-history /c 0" "
+0\tmain\r
+1\t inc\r
+2\tmain\r
+3\t inc\r
+4\tmain\r
+5\t inc\r
+6\tmain\r
+7\t inc\r
+8\tmain\r
+9\t inc\r
+10\tmain\r
+11\t inc\r
+12\tmain\r
+13\t inc\r
+14\tmain\r
+15\t inc\r
+16\tmain\r
+17\t inc\r
+18\tmain\r
+19\t inc\r
+20\tmain\r" "indented record function-call-history - with size unlimited"
+
# show function call history with size of 21, we expect to see all 21 entries
gdb_test_no_output "set record function-call-history-size 21"
# show function call history
@@ -155,32 +179,35 @@ gdb_test "record function-call-history -" "At the start of the branch trace reco
# make sure we cannot move any further back
gdb_test "record function-call-history -" "At the start of the branch trace record\\." "record function-call-history - at the start (2)"
+# don't mess around with path names
+gdb_test_no_output "set filename-display basename"
+
# moving forward again, but this time with file and line number, expected to see the first 15 entries
gdb_test "record function-call-history /l +" "
-.*$srcfile:40-41\tmain\r
-.*$srcfile:22-24\tinc\r
-.*$srcfile:40-41\tmain\r
-.*$srcfile:22-24\tinc\r
-.*$srcfile:40-41\tmain\r
-.*$srcfile:22-24\tinc\r
-.*$srcfile:40-41\tmain\r
-.*$srcfile:22-24\tinc\r
-.*$srcfile:40-41\tmain\r
-.*$srcfile:22-24\tinc\r
-.*$srcfile:40-41\tmain\r
-.*$srcfile:22-24\tinc\r
-.*$srcfile:40-41\tmain\r
-.*$srcfile:22-24\tinc\r
-.*$srcfile:40-41\tmain\r" "record function-call-history /l - show first 15 entries"
+\[0-9\]*\tmain\tat $srcfile:40,41\r
+\[0-9\]*\tinc\tat $srcfile:22,24\r
+\[0-9\]*\tmain\tat $srcfile:40,41\r
+\[0-9\]*\tinc\tat $srcfile:22,24\r
+\[0-9\]*\tmain\tat $srcfile:40,41\r
+\[0-9\]*\tinc\tat $srcfile:22,24\r
+\[0-9\]*\tmain\tat $srcfile:40,41\r
+\[0-9\]*\tinc\tat $srcfile:22,24\r
+\[0-9\]*\tmain\tat $srcfile:40,41\r
+\[0-9\]*\tinc\tat $srcfile:22,24\r
+\[0-9\]*\tmain\tat $srcfile:40,41\r
+\[0-9\]*\tinc\tat $srcfile:22,24\r
+\[0-9\]*\tmain\tat $srcfile:40,41\r
+\[0-9\]*\tinc\tat $srcfile:22,24\r
+\[0-9\]*\tmain\tat $srcfile:40,41\r" "record function-call-history /l - show first 15 entries"
# moving forward and expect to see the latest 6 entries
gdb_test "record function-call-history /l +" "
-.*$srcfile:22-24\tinc\r
-.*$srcfile:40-41\tmain\r
-.*$srcfile:22-24\tinc\r
-.*$srcfile:40-41\tmain\r
-.*$srcfile:22-24\tinc\r
-.*$srcfile:40-43\tmain\r" "record function-call-history /l - show last 6 entries"
+\[0-9\]*\tinc\tat $srcfile:22,24\r
+\[0-9\]*\tmain\tat $srcfile:40,41\r
+\[0-9\]*\tinc\tat $srcfile:22,24\r
+\[0-9\]*\tmain\tat $srcfile:40,41\r
+\[0-9\]*\tinc\tat $srcfile:22,24\r
+\[0-9\]*\tmain\tat $srcfile:40,43\r" "record function-call-history /l - show last 6 entries"
# moving further forward shouldn't work
gdb_test "record function-call-history /l +" "At the end of the branch trace record\\." "record function-call-history /l - at the end (1)"
@@ -219,3 +246,46 @@ gdb_test "record function-call-history" "
28\tfib\r
29\tfib\r
30\tmain" "show recursive function call history"
+
+# show indented function call history for fib
+gdb_test "record function-call-history /c 20, +11" "
+20\tmain\r
+21\t fib\r
+22\t fib\r
+23\t fib\r
+24\t fib\r
+25\t fib\r
+26\t fib\r
+27\t fib\r
+28\t fib\r
+29\t fib\r
+30\tmain\r" "indented record function-call-history - fib"
+
+# make sure we can handle incomplete trace with respect to indentation
+if ![runto_main] {
+ return -1
+}
+# navigate to the fib in line 24 above
+gdb_breakpoint fib
+gdb_continue_to_breakpoint "cont to fib.1"
+gdb_continue_to_breakpoint "cont to fib.2"
+gdb_continue_to_breakpoint "cont to fib.3"
+gdb_continue_to_breakpoint "cont to fib.4"
+
+# start tracing
+gdb_test_no_output "record btrace"
+
+# continue until line 30 above
+delete_breakpoints
+set bp_location [gdb_get_line_number "bp.2" $testfile.c]
+gdb_breakpoint $bp_location
+gdb_continue_to_breakpoint "cont to $bp_location" ".*$testfile.c:$bp_location.*"
+
+# let's look at the trace. we expect to see the tail of the above listing.
+gdb_test "record function-call-history /c" "
+0\t fib\r
+1\t fib\r
+2\t fib\r
+3\t fib\r
+4\t fib\r
+5\tmain\r" "indented record function-call-history - fib"
diff --git a/gdb/testsuite/gdb.btrace/tailcall.exp b/gdb/testsuite/gdb.btrace/tailcall.exp
new file mode 100644
index 0000000..8e47a12
--- /dev/null
+++ b/gdb/testsuite/gdb.btrace/tailcall.exp
@@ -0,0 +1,49 @@
+# This testcase is part of GDB, the GNU debugger.
+#
+# Copyright 2013 Free Software Foundation, Inc.
+#
+# Contributed by Intel Corp. <markus.t.metzger@intel.com>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+# check for btrace support
+if { [skip_btrace_tests] } { return -1 }
+
+# start inferior
+standard_testfile x86-tailcall.S
+if [prepare_for_testing tailcall.exp $testfile $srcfile {c++ debug}] {
+ return -1
+}
+if ![runto_main] {
+ return -1
+}
+
+# we want to see the full trace for this test
+gdb_test_no_output "set record function-call-history-size 0"
+
+# trace the call to foo
+gdb_test_no_output "record btrace"
+gdb_test "next"
+
+# show the flat branch trace
+gdb_test "record function-call-history 0" "
+0\tfoo\r
+1\tbar\r
+2\tmain" "tailcall - flat"
+
+# show the branch trace with calls indented
+gdb_test "record function-call-history /c 0" "
+0\t foo\r
+1\t bar\r
+2\tmain" "tailcall - calls indented"
diff --git a/gdb/testsuite/gdb.btrace/unknown_functions.c b/gdb/testsuite/gdb.btrace/unknown_functions.c
new file mode 100644
index 0000000..178c3e9
--- /dev/null
+++ b/gdb/testsuite/gdb.btrace/unknown_functions.c
@@ -0,0 +1,45 @@
+/* This testcase is part of GDB, the GNU debugger.
+
+ Copyright 2013 Free Software Foundation, Inc.
+
+ Contributed by Intel Corp. <markus.t.metzger@intel.com>
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see <http://www.gnu.org/licenses/>. */
+
+static int foo (void);
+
+int test (void)
+{
+ return foo ();
+}
+
+static int
+bar (void)
+{
+ return 42;
+}
+
+static int
+foo (void)
+{
+ return bar ();
+}
+
+int
+main (void)
+{
+ test ();
+ test ();
+ return 0;
+}
diff --git a/gdb/testsuite/gdb.btrace/unknown_functions.exp b/gdb/testsuite/gdb.btrace/unknown_functions.exp
new file mode 100644
index 0000000..fb0a791
--- /dev/null
+++ b/gdb/testsuite/gdb.btrace/unknown_functions.exp
@@ -0,0 +1,58 @@
+# This testcase is part of GDB, the GNU debugger.
+#
+# Copyright 2013 Free Software Foundation, Inc.
+#
+# Contributed by Intel Corp. <markus.t.metzger@intel.com>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+# check for btrace support
+if { [skip_btrace_tests] } { return -1 }
+
+# start inferior
+standard_testfile
+
+# discard local symbols
+set ldflags "additional_flags=-Wl,-x"
+if [prepare_for_testing $testfile.exp $testfile $srcfile $ldflags] {
+ return -1
+}
+if ![runto test] {
+ return -1
+}
+
+# we want to see the full trace for this test
+gdb_test_no_output "set record function-call-history-size 0"
+
+# trace from one call of test to the next
+gdb_test_no_output "record btrace"
+gdb_continue_to_breakpoint "cont to test" ".*test.*"
+
+# show the flat branch trace
+gdb_test "record function-call-history 0" "
+0\t<unknown>\r
+1\t<unknown>\r
+2\t<unknown>\r
+3\ttest\r
+4\tmain\r
+5\ttest" "unknown - flat"
+
+# show the branch trace with calls indented
+gdb_test "record function-call-history /c 0" "
+0\t <unknown>\r
+1\t <unknown>\r
+2\t <unknown>\r
+3\t test\r
+4\tmain\r
+5\t test" "unknown - calls indented"
diff --git a/gdb/testsuite/gdb.btrace/x86-tailcall.S b/gdb/testsuite/gdb.btrace/x86-tailcall.S
new file mode 100644
index 0000000..5a4fede
--- /dev/null
+++ b/gdb/testsuite/gdb.btrace/x86-tailcall.S
@@ -0,0 +1,269 @@
+/* This testcase is part of GDB, the GNU debugger.
+
+ Copyright 2013 Free Software Foundation, Inc.
+
+ Contributed by Intel Corp. <markus.t.metzger@intel.com>
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+
+ This file has been generated using:
+ gcc -S -O2 -g x86-tailcall.c -o x86-tailcall.S */
+
+ .file "x86-tailcall.c"
+ .section .debug_abbrev,"",@progbits
+.Ldebug_abbrev0:
+ .section .debug_info,"",@progbits
+.Ldebug_info0:
+ .section .debug_line,"",@progbits
+.Ldebug_line0:
+ .text
+.Ltext0:
+ .p2align 4,,15
+ .type bar, @function
+bar:
+.LFB0:
+ .file 1 "gdb/testsuite/gdb.btrace/x86-tailcall.c"
+ .loc 1 22 0
+ .cfi_startproc
+ .loc 1 24 0
+ movl $42, %eax
+ ret
+ .cfi_endproc
+.LFE0:
+ .size bar, .-bar
+ .p2align 4,,15
+ .type foo, @function
+foo:
+.LFB1:
+ .loc 1 28 0
+ .cfi_startproc
+ .loc 1 29 0
+ jmp bar
+ .cfi_endproc
+.LFE1:
+ .size foo, .-foo
+ .p2align 4,,15
+.globl main
+ .type main, @function
+main:
+.LFB2:
+ .loc 1 34 0
+ .cfi_startproc
+ .loc 1 37 0
+ call foo
+.LVL0:
+ addl $1, %eax
+.LVL1:
+ .loc 1 39 0
+ ret
+ .cfi_endproc
+.LFE2:
+ .size main, .-main
+.Letext0:
+ .section .debug_loc,"",@progbits
+.Ldebug_loc0:
+.LLST0:
+ .quad .LVL0-.Ltext0
+ .quad .LVL1-.Ltext0
+ .value 0x3
+ .byte 0x70
+ .sleb128 1
+ .byte 0x9f
+ .quad .LVL1-.Ltext0
+ .quad .LFE2-.Ltext0
+ .value 0x1
+ .byte 0x50
+ .quad 0x0
+ .quad 0x0
+ .section .debug_info
+ .long 0x9c
+ .value 0x3
+ .long .Ldebug_abbrev0
+ .byte 0x8
+ .uleb128 0x1
+ .long .LASF0
+ .byte 0x1
+ .long .LASF1
+ .long .LASF2
+ .quad .Ltext0
+ .quad .Letext0
+ .long .Ldebug_line0
+ .uleb128 0x2
+ .string "bar"
+ .byte 0x1
+ .byte 0x15
+ .byte 0x1
+ .long 0x4b
+ .quad .LFB0
+ .quad .LFE0
+ .byte 0x1
+ .byte 0x9c
+ .uleb128 0x3
+ .byte 0x4
+ .byte 0x5
+ .string "int"
+ .uleb128 0x2
+ .string "foo"
+ .byte 0x1
+ .byte 0x1b
+ .byte 0x1
+ .long 0x4b
+ .quad .LFB1
+ .quad .LFE1
+ .byte 0x1
+ .byte 0x9c
+ .uleb128 0x4
+ .byte 0x1
+ .long .LASF3
+ .byte 0x1
+ .byte 0x21
+ .byte 0x1
+ .long 0x4b
+ .quad .LFB2
+ .quad .LFE2
+ .byte 0x1
+ .byte 0x9c
+ .uleb128 0x5
+ .long .LASF4
+ .byte 0x1
+ .byte 0x23
+ .long 0x4b
+ .long .LLST0
+ .byte 0x0
+ .byte 0x0
+ .section .debug_abbrev
+ .uleb128 0x1
+ .uleb128 0x11
+ .byte 0x1
+ .uleb128 0x25
+ .uleb128 0xe
+ .uleb128 0x13
+ .uleb128 0xb
+ .uleb128 0x3
+ .uleb128 0xe
+ .uleb128 0x1b
+ .uleb128 0xe
+ .uleb128 0x11
+ .uleb128 0x1
+ .uleb128 0x12
+ .uleb128 0x1
+ .uleb128 0x10
+ .uleb128 0x6
+ .byte 0x0
+ .byte 0x0
+ .uleb128 0x2
+ .uleb128 0x2e
+ .byte 0x0
+ .uleb128 0x3
+ .uleb128 0x8
+ .uleb128 0x3a
+ .uleb128 0xb
+ .uleb128 0x3b
+ .uleb128 0xb
+ .uleb128 0x27
+ .uleb128 0xc
+ .uleb128 0x49
+ .uleb128 0x13
+ .uleb128 0x11
+ .uleb128 0x1
+ .uleb128 0x12
+ .uleb128 0x1
+ .uleb128 0x40
+ .uleb128 0xa
+ .byte 0x0
+ .byte 0x0
+ .uleb128 0x3
+ .uleb128 0x24
+ .byte 0x0
+ .uleb128 0xb
+ .uleb128 0xb
+ .uleb128 0x3e
+ .uleb128 0xb
+ .uleb128 0x3
+ .uleb128 0x8
+ .byte 0x0
+ .byte 0x0
+ .uleb128 0x4
+ .uleb128 0x2e
+ .byte 0x1
+ .uleb128 0x3f
+ .uleb128 0xc
+ .uleb128 0x3
+ .uleb128 0xe
+ .uleb128 0x3a
+ .uleb128 0xb
+ .uleb128 0x3b
+ .uleb128 0xb
+ .uleb128 0x27
+ .uleb128 0xc
+ .uleb128 0x49
+ .uleb128 0x13
+ .uleb128 0x11
+ .uleb128 0x1
+ .uleb128 0x12
+ .uleb128 0x1
+ .uleb128 0x40
+ .uleb128 0xa
+ .byte 0x0
+ .byte 0x0
+ .uleb128 0x5
+ .uleb128 0x34
+ .byte 0x0
+ .uleb128 0x3
+ .uleb128 0xe
+ .uleb128 0x3a
+ .uleb128 0xb
+ .uleb128 0x3b
+ .uleb128 0xb
+ .uleb128 0x49
+ .uleb128 0x13
+ .uleb128 0x2
+ .uleb128 0x6
+ .byte 0x0
+ .byte 0x0
+ .byte 0x0
+ .section .debug_pubnames,"",@progbits
+ .long 0x17
+ .value 0x2
+ .long .Ldebug_info0
+ .long 0xa0
+ .long 0x70
+ .string "main"
+ .long 0x0
+ .section .debug_aranges,"",@progbits
+ .long 0x2c
+ .value 0x2
+ .long .Ldebug_info0
+ .byte 0x8
+ .byte 0x0
+ .value 0x0
+ .value 0x0
+ .quad .Ltext0
+ .quad .Letext0-.Ltext0
+ .quad 0x0
+ .quad 0x0
+ .section .debug_str,"MS",@progbits,1
+.LASF1:
+ .string "gdb/testsuite/gdb.btrace/x86-tailcall.c"
+.LASF4:
+ .string "answer"
+.LASF0:
+ .string "GNU C 4.4.4 20100726 (Red Hat 4.4.4-13)"
+.LASF3:
+ .string "main"
+.LASF2:
+ .string "/users/mmetzger/gdb/gerrit/git"
+ .ident "GCC: (GNU) 4.4.4 20100726 (Red Hat 4.4.4-13)"
+ .section .note.GNU-stack,"",@progbits
diff --git a/gdb/testsuite/gdb.btrace/x86-tailcall.c b/gdb/testsuite/gdb.btrace/x86-tailcall.c
new file mode 100644
index 0000000..9e3b183
--- /dev/null
+++ b/gdb/testsuite/gdb.btrace/x86-tailcall.c
@@ -0,0 +1,39 @@
+/* This testcase is part of GDB, the GNU debugger.
+
+ Copyright 2013 Free Software Foundation, Inc.
+
+ Contributed by Intel Corp. <markus.t.metzger@intel.com>
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see <http://www.gnu.org/licenses/>. */
+
+static __attribute__ ((noinline)) int
+bar (void)
+{
+ return 42;
+}
+
+static __attribute__ ((noinline)) int
+foo (void)
+{
+ return bar ();
+}
+
+int
+main (void)
+{
+ int answer;
+
+ answer = foo ();
+ return ++answer;
+}
--
1.7.1
^ permalink raw reply [flat|nested] 24+ messages in thread* Re: [PATCH 05/15] record-btrace: optionally indent function call history
2013-05-02 12:04 ` [PATCH 05/15] record-btrace: optionally indent function call history Markus Metzger
@ 2013-05-02 17:10 ` Eli Zaretskii
0 siblings, 0 replies; 24+ messages in thread
From: Eli Zaretskii @ 2013-05-02 17:10 UTC (permalink / raw)
To: Markus Metzger; +Cc: jan.kratochvil, gdb-patches, christian.himpel
> From: Markus Metzger <markus.t.metzger@intel.com>
> Cc: gdb-patches@sourceware.org, Eli Zaretskii <eliz@gnu.org>,
> Christian Himpel <christian.himpel@intel.com>
> Date: Thu, 2 May 2013 14:03:26 +0200
>
> Add a new modifier /c to the "record function-call-history" command to
> indent the function name based on its depth in the call stack.
>
> Also reorder the optional fields to have the indentation at the very beginning.
> Prefix the insn range (/i modifier) with "inst ".
> Prefix the source line (/l modifier) with "at ".
> Change the range syntax from "begin-end" to "begin,end" to allow copy&paste to
> the "record instruction-history" and "list" commands.
>
> Adjust the respective tests and add new tests for the /c modifier.
OK for the documentation parts. Thanks.
^ permalink raw reply [flat|nested] 24+ messages in thread