From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: (qmail 107461 invoked by alias); 10 May 2017 11:48:32 -0000 Mailing-List: contact gdb-patches-help@sourceware.org; run by ezmlm Precedence: bulk List-Id: List-Subscribe: List-Archive: List-Post: List-Help: , Sender: gdb-patches-owner@sourceware.org Received: (qmail 99908 invoked by uid 89); 10 May 2017 11:48:17 -0000 Authentication-Results: sourceware.org; auth=none X-Virus-Found: No X-Spam-SWARE-Status: No, score=-24.7 required=5.0 tests=AWL,BAYES_00,GIT_PATCH_0,GIT_PATCH_1,GIT_PATCH_2,GIT_PATCH_3,KAM_LAZY_DOMAIN_SECURITY,RP_MATCHES_RCVD autolearn=ham version=3.3.2 spammy=19107 X-HELO: mga09.intel.com Received: from mga09.intel.com (HELO mga09.intel.com) (134.134.136.24) by sourceware.org (qpsmtpd/0.93/v0.84-503-g423c35a) with ESMTP; Wed, 10 May 2017 11:48:00 +0000 Received: from fmsmga005.fm.intel.com ([10.253.24.32]) by orsmga102.jf.intel.com with ESMTP/TLS/DHE-RSA-AES256-GCM-SHA384; 10 May 2017 04:47:59 -0700 X-ExtLoop1: 1 Received: from irvmail001.ir.intel.com ([163.33.26.43]) by fmsmga005.fm.intel.com with ESMTP; 10 May 2017 04:47:58 -0700 Received: from ulvlx001.iul.intel.com (ulvlx001.iul.intel.com [172.28.207.17]) by irvmail001.ir.intel.com (8.14.3/8.13.6/MailSET/Hub) with ESMTP id v4ABlvBS018160; Wed, 10 May 2017 12:47:57 +0100 Received: from ulvlx001.iul.intel.com (localhost [127.0.0.1]) by ulvlx001.iul.intel.com with ESMTP id v4ABlvko019932; Wed, 10 May 2017 13:47:57 +0200 Received: (from twiederh@localhost) by ulvlx001.iul.intel.com with œ id v4ABlvuw019928; Wed, 10 May 2017 13:47:57 +0200 From: Tim Wiederhake To: gdb-patches@sourceware.org Cc: markus.t.metzger@intel.com, simon.marchi@polymtl.ca Subject: [PATCH v4 12/12] btrace: Store function segments as objects. Date: Wed, 10 May 2017 11:48:00 -0000 Message-Id: <1494416867-19612-13-git-send-email-tim.wiederhake@intel.com> In-Reply-To: <1494416867-19612-1-git-send-email-tim.wiederhake@intel.com> References: <1494416867-19612-1-git-send-email-tim.wiederhake@intel.com> X-IsSubscribed: yes X-SW-Source: 2017-05/txt/msg00257.txt.bz2 2017-05-10 Tim Wiederhake gdb/ChangeLog: * btrace.c (ftrace_find_call_by_number): New function. (ftrace_new_function): Store objects, not pointers. (ftrace_find_call_by_number, ftrace_new_return, ftrace_new_switch, ftrace_new_gap, ftrace_update_function, ftrace_compute_global_level_offset, btrace_stich_bts, btrace_clear, btrace_insn_get, btrace_insn_get_error, btrace_insn_end, btrace_insn_next, btrace_insn_prev, ptrace_find_insn_by_number, btrace_ends_with_single_insn, btrace_call_get): Account for btrace_thread_info::functions now storing objects. * btrace.h (struct btrace_thread_info) : Make std::vector. * record-btrace.c (record_btrace_frame_this_id, record_btrace_frame_prev_register, record_btrace_frame_sniffer): Account for btrace_thread_info::functions now storing objects. --- gdb/btrace.c | 89 ++++++++++++++++++++++++++--------------------------- gdb/btrace.h | 7 +++-- gdb/record-btrace.c | 10 +++--- 3 files changed, 53 insertions(+), 53 deletions(-) diff --git a/gdb/btrace.c b/gdb/btrace.c index 9278008..75f6ca1 100644 --- a/gdb/btrace.c +++ b/gdb/btrace.c @@ -156,13 +156,25 @@ ftrace_call_num_insn (const struct btrace_function* bfun) exists. BTINFO is the branch trace information for the current thread. */ static struct btrace_function * +ftrace_find_call_by_number (struct btrace_thread_info *btinfo, + unsigned int number) +{ + if (number == 0 || number > btinfo->functions.size ()) + return NULL; + + return &btinfo->functions[number - 1]; +} + +/* A const version of the function above. */ + +static const struct btrace_function * ftrace_find_call_by_number (const struct btrace_thread_info *btinfo, unsigned int number) { if (number == 0 || number > btinfo->functions.size ()) return NULL; - return btinfo->functions[number - 1]; + return &btinfo->functions[number - 1]; } /* Return non-zero if BFUN does not match MFUN and FUN, @@ -214,37 +226,33 @@ ftrace_function_switched (const struct btrace_function *bfun, /* Allocate and initialize a new branch trace function segment at the end of the trace. BTINFO is the branch trace information for the current thread. - MFUN and FUN are the symbol information we have for this function. */ + MFUN and FUN are the symbol information we have for this function. + This invalidates all struct btrace_function pointer currently held. */ static struct btrace_function * ftrace_new_function (struct btrace_thread_info *btinfo, struct minimal_symbol *mfun, struct symbol *fun) { - struct btrace_function *bfun; - - bfun = XCNEW (struct btrace_function); - - bfun->msym = mfun; - bfun->sym = fun; + struct btrace_function bfun {mfun, fun, 0, 0, 0, NULL, 0, 0, 0, 0, 0}; if (btinfo->functions.empty ()) { /* Start counting at one. */ - bfun->number = 1; - bfun->insn_offset = 1; + bfun.number = 1; + bfun.insn_offset = 1; } else { - struct btrace_function *prev = btinfo->functions.back (); + struct btrace_function *prev = &btinfo->functions.back (); - bfun->number = prev->number + 1; - bfun->insn_offset = prev->insn_offset + ftrace_call_num_insn (prev); - bfun->level = prev->level; + bfun.number = prev->number + 1; + bfun.insn_offset = prev->insn_offset + ftrace_call_num_insn (prev); + bfun.level = prev->level; } btinfo->functions.push_back (bfun); - return bfun; + return &btinfo->functions.back (); } /* Update the UP field of a function segment. */ @@ -406,10 +414,10 @@ ftrace_new_return (struct btrace_thread_info *btinfo, struct minimal_symbol *mfun, struct symbol *fun) { - struct btrace_function *prev = btinfo->functions.back (); - struct btrace_function *bfun, *caller; + struct btrace_function *prev, *bfun, *caller; bfun = ftrace_new_function (btinfo, mfun, fun); + prev = ftrace_find_call_by_number (btinfo, bfun->number - 1); /* It is important to start at PREV's caller. Otherwise, we might find PREV itself, if PREV is a recursive function. */ @@ -488,12 +496,12 @@ ftrace_new_switch (struct btrace_thread_info *btinfo, struct minimal_symbol *mfun, struct symbol *fun) { - struct btrace_function *prev = btinfo->functions.back (); - struct btrace_function *bfun; + struct btrace_function *prev, *bfun; /* This is an unexplained function switch. We can't really be sure about the call stack, yet the best I can think of right now is to preserve it. */ bfun = ftrace_new_function (btinfo, mfun, fun); + prev = ftrace_find_call_by_number (btinfo, bfun->number - 1); bfun->up = prev->up; bfun->flags = prev->flags; @@ -518,7 +526,7 @@ ftrace_new_gap (struct btrace_thread_info *btinfo, int errcode, else { /* We hijack the previous function segment if it was empty. */ - bfun = btinfo->functions.back (); + bfun = &btinfo->functions.back (); if (bfun->errcode != 0 || !VEC_empty (btrace_insn_s, bfun->insn)) bfun = ftrace_new_function (btinfo, NULL, NULL); } @@ -559,7 +567,7 @@ ftrace_update_function (struct btrace_thread_info *btinfo, CORE_ADDR pc) return ftrace_new_function (btinfo, mfun, fun); /* If we had a gap before, we create a function. */ - bfun = btinfo->functions.back (); + bfun = &btinfo->functions.back (); if (bfun->errcode != 0) return ftrace_new_function (btinfo, mfun, fun); @@ -738,10 +746,10 @@ ftrace_compute_global_level_offset (struct btrace_thread_info *btinfo) /* The last function segment contains the current instruction, which is not really part of the trace. If it contains just this one instruction, we ignore the segment. */ - if (bfun->number == length && VEC_length (btrace_insn_s, bfun->insn) == 1) + if (bfun.number == length && VEC_length (btrace_insn_s, bfun.insn) == 1) continue; - level = std::min (level, bfun->level); + level = std::min (level, bfun.level); } DEBUG_FTRACE ("setting global level offset: %d", -level); @@ -1610,7 +1618,7 @@ btrace_stitch_bts (struct btrace_data_bts *btrace, struct thread_info *tp) gdb_assert (!btinfo->functions.empty ()); gdb_assert (!VEC_empty (btrace_block_s, btrace->blocks)); - last_bfun = btinfo->functions.back (); + last_bfun = &btinfo->functions.back (); /* If the existing trace ends with a gap, we just glue the traces together. We need to drop the last (i.e. chronologically first) block @@ -1902,10 +1910,7 @@ btrace_clear (struct thread_info *tp) btinfo = &tp->btrace; for (auto &bfun : btinfo->functions) - { - VEC_free (btrace_insn_s, bfun->insn); - xfree (bfun); - } + VEC_free (btrace_insn_s, bfun.insn); btinfo->functions.clear (); btinfo->ngaps = 0; @@ -2254,7 +2259,7 @@ btrace_insn_get (const struct btrace_insn_iterator *it) unsigned int index, end; index = it->insn_index; - bfun = it->btinfo->functions[it->call_index]; + bfun = &it->btinfo->functions[it->call_index]; /* Check if the iterator points to a gap in the trace. */ if (bfun->errcode != 0) @@ -2273,10 +2278,7 @@ btrace_insn_get (const struct btrace_insn_iterator *it) int btrace_insn_get_error (const struct btrace_insn_iterator *it) { - const struct btrace_function *bfun; - - bfun = it->btinfo->functions[it->call_index]; - return bfun->errcode; + return it->btinfo->functions[it->call_index].errcode; } /* See btrace.h. */ @@ -2284,10 +2286,7 @@ btrace_insn_get_error (const struct btrace_insn_iterator *it) unsigned int btrace_insn_number (const struct btrace_insn_iterator *it) { - const struct btrace_function *bfun; - - bfun = it->btinfo->functions[it->call_index]; - return bfun->insn_offset + it->insn_index; + return it->btinfo->functions[it->call_index].insn_offset + it->insn_index; } /* See btrace.h. */ @@ -2316,7 +2315,7 @@ btrace_insn_end (struct btrace_insn_iterator *it, if (btinfo->functions.empty ()) error (_("No trace.")); - bfun = btinfo->functions.back (); + bfun = &btinfo->functions.back (); length = VEC_length (btrace_insn_s, bfun->insn); /* The last function may either be a gap or it contains the current @@ -2338,7 +2337,7 @@ btrace_insn_next (struct btrace_insn_iterator *it, unsigned int stride) const struct btrace_function *bfun; unsigned int index, steps; - bfun = it->btinfo->functions[it->call_index]; + bfun = &it->btinfo->functions[it->call_index]; steps = 0; index = it->insn_index; @@ -2420,7 +2419,7 @@ btrace_insn_prev (struct btrace_insn_iterator *it, unsigned int stride) const struct btrace_function *bfun; unsigned int index, steps; - bfun = it->btinfo->functions[it->call_index]; + bfun = &it->btinfo->functions[it->call_index]; steps = 0; index = it->insn_index; @@ -2498,12 +2497,12 @@ btrace_find_insn_by_number (struct btrace_insn_iterator *it, return 0; lower = 0; - bfun = btinfo->functions[lower]; + bfun = &btinfo->functions[lower]; if (number < bfun->insn_offset) return 0; upper = btinfo->functions.size () - 1; - bfun = btinfo->functions[upper]; + bfun = &btinfo->functions[upper]; if (number >= bfun->insn_offset + ftrace_call_num_insn (bfun)) return 0; @@ -2512,7 +2511,7 @@ btrace_find_insn_by_number (struct btrace_insn_iterator *it, { const unsigned int average = lower + (upper - lower) / 2; - bfun = btinfo->functions[average]; + bfun = &btinfo->functions[average]; if (number < bfun->insn_offset) { @@ -2546,7 +2545,7 @@ btrace_ends_with_single_insn (const struct btrace_thread_info *btinfo) if (btinfo->functions.empty ()) return false; - bfun = btinfo->functions.back (); + bfun = &btinfo->functions.back (); if (bfun->errcode != 0) return false; @@ -2561,7 +2560,7 @@ btrace_call_get (const struct btrace_call_iterator *it) if (it->index >= it->btinfo->functions.size ()) return NULL; - return it->btinfo->functions[it->index]; + return &it->btinfo->functions[it->index]; } /* See btrace.h. */ diff --git a/gdb/btrace.h b/gdb/btrace.h index 01f1888..1f06d6d 100644 --- a/gdb/btrace.h +++ b/gdb/btrace.h @@ -325,9 +325,10 @@ struct btrace_thread_info /* The raw branch trace data for the below branch trace. */ struct btrace_data data; - /* Vector of pointer to decoded function segments. These are in execution - order with the first element == BEGIN and the last element == END. */ - std::vector functions; + /* Vector of decoded function segments in execution flow order. Note that + the numbering for btrace function segments starts with 1, so function + segment i will be at index (i - 1). */ + std::vector functions; /* The function level offset. When added to each function's LEVEL, this normalizes the function levels such that the smallest level diff --git a/gdb/record-btrace.c b/gdb/record-btrace.c index a66d32a..d00ffce 100644 --- a/gdb/record-btrace.c +++ b/gdb/record-btrace.c @@ -1592,7 +1592,7 @@ record_btrace_frame_this_id (struct frame_info *this_frame, void **this_cache, gdb_assert (bfun != NULL); while (bfun->prev != 0) - bfun = cache->tp->btrace.functions[bfun->prev - 1]; + bfun = &cache->tp->btrace.functions[bfun->prev - 1]; code = get_frame_func (this_frame); special = bfun->number; @@ -1633,7 +1633,7 @@ record_btrace_frame_prev_register (struct frame_info *this_frame, throw_error (NOT_AVAILABLE_ERROR, _("No caller in btrace record history")); - caller = cache->tp->btrace.functions[bfun->up - 1]; + caller = &cache->tp->btrace.functions[bfun->up - 1]; if ((bfun->flags & BFUN_UP_LINKS_TO_RET) != 0) { @@ -1679,7 +1679,7 @@ record_btrace_frame_sniffer (const struct frame_unwind *self, replay = tp->btrace.replay; if (replay != NULL) - bfun = replay->btinfo->functions[replay->call_index]; + bfun = &replay->btinfo->functions[replay->call_index]; } else { @@ -1687,7 +1687,7 @@ record_btrace_frame_sniffer (const struct frame_unwind *self, callee = btrace_get_frame_function (next); if (callee != NULL && (callee->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0) - bfun = tp->btrace.functions[callee->up - 1]; + bfun = &tp->btrace.functions[callee->up - 1]; } if (bfun == NULL) @@ -1732,7 +1732,7 @@ record_btrace_tailcall_frame_sniffer (const struct frame_unwind *self, return 0; tinfo = find_thread_ptid (inferior_ptid); - bfun = tinfo->btrace.functions[callee->up - 1]; + bfun = &tinfo->btrace.functions[callee->up - 1]; DEBUG ("[frame] sniffed tailcall frame for %s on level %d", btrace_get_bfun_name (bfun), bfun->level); -- 2.7.4