From: Yao Qi <qiyaoltc@gmail.com>
To: Philipp Rudo <prudo@linux.vnet.ibm.com>
Cc: gdb-patches@sourceware.org, omair.javaid@linaro.org,
yao.qi@linaro.org, peter.griffin@linaro.org,
arnez@linux.vnet.ibm.com
Subject: Re: [RFC v4 3/9] Add basic Linux kernel support
Date: Thu, 15 Jun 2017 15:23:00 -0000 [thread overview]
Message-ID: <86vanxtguc.fsf@gmail.com> (raw)
In-Reply-To: <20170612170836.25174-4-prudo@linux.vnet.ibm.com> (Philipp Rudo's message of "Mon, 12 Jun 2017 19:08:30 +0200")
Philipp Rudo <prudo@linux.vnet.ibm.com> writes:
> /* Initialize a private data entry for an address, where NAME is the name
> + of the symbol, i.e. variable name in Linux, ALIAS the name used to
> + retrieve the entry from hashtab, and SILENT a flag to determine if
> + errors should be ignored.
> +
> + Returns a pointer to the new entry. In case of an error, either returns
> + NULL (SILENT = TRUE) or throws an error (SILENT = FALSE). If SILENT = TRUE
> + the caller is responsible to check for errors.
> +
> + Do not use directly, use LK_DECLARE_* macros defined in lk-low.h instead. */
> +
> +struct lk_private_data *
> +lk_init_addr (const char *name, const char *alias, int silent)
s/bool/int/
> +{
> + struct lk_private_data *data;
> + struct bound_minimal_symbol bmsym;
> + void **new_slot;
> + void *old_slot;
> +
> + if ((old_slot = lk_find (alias)) != NULL)
> + return (struct lk_private_data *) old_slot;
> +
> + bmsym = lookup_minimal_symbol (name, NULL, NULL);
> +
> + if (bmsym.minsym == NULL)
> + {
> + if (!silent)
> + error (_("Could not find address %s. Aborting."), alias);
> + return NULL;
> + }
> +
> + data = XCNEW (struct lk_private_data);
> + data->alias = alias;
> + data->data.addr = BMSYMBOL_VALUE_ADDRESS (bmsym);
> +
> + new_slot = lk_find_slot (alias);
> + *new_slot = data;
> +
> + return data;
> +}
> +
> +/* Same as lk_init_addr but for structs. */
> +
> +struct lk_private_data *
> +lk_init_struct (const char *name, const char *alias, int silent)
Likewise, bool silent.
> +{
> + struct lk_private_data *data;
> + const struct block *global;
> + const struct symbol *sym;
> + struct type *type;
> + void **new_slot;
> + void *old_slot;
> +
> + if ((old_slot = lk_find (alias)) != NULL)
> + return (struct lk_private_data *) old_slot;
> +
> + global = block_global_block(get_selected_block (0));
> + sym = lookup_symbol (name, global, STRUCT_DOMAIN, NULL).symbol;
> +
> + if (sym != NULL)
> + {
> + type = SYMBOL_TYPE (sym);
> + goto out;
> + }
> +
> + /* Chek for "typedef struct { ... } name;"-like definitions. */
> + sym = lookup_symbol (name, global, VAR_DOMAIN, NULL).symbol;
> + if (sym == NULL)
> + goto error;
> +
> + type = check_typedef (SYMBOL_TYPE (sym));
> +
> + if (TYPE_CODE (type) == TYPE_CODE_STRUCT)
> + goto out;
These two "goto" can be removed.
> +
> +error:
> + if (!silent)
> + error (_("Could not find %s. Aborting."), alias);
> +
> + return NULL;
> +
> +out:
> + data = XCNEW (struct lk_private_data);
> + data->alias = alias;
> + data->data.type = type;
> +
> + new_slot = lk_find_slot (alias);
> + *new_slot = data;
> +
> + return data;
> +}
> +
> +
> +/* Reads a bitmap at a given ADDRess of size SIZE (in bits). Allocates and
> + returns an array of ulongs. The caller is responsible to free the array
> + after it is no longer needed. */
> +
> +ULONGEST *
> +lk_read_bitmap (CORE_ADDR addr, size_t size)
> +{
> + ULONGEST *bitmap;
> + size_t ulong_size, len;
> +
> + ulong_size = lk_builtin_type_size (unsigned_long);
> + len = LK_DIV_ROUND_UP (size, ulong_size * LK_BITS_PER_BYTE);
> + bitmap = XNEWVEC (ULONGEST, len);
> +
> + for (size_t i = 0; i < len; i++)
> + bitmap[i] = lk_read_ulong (addr + i * ulong_size);
> +
> + return bitmap;
> +}
> +
> +/* Return the next set bit in bitmap BITMAP of size SIZE (in bits)
> + starting from bit (index) BIT. Return SIZE when the end of the bitmap
> + was reached. To iterate over all set bits use macro
> + LK_BITMAP_FOR_EACH_SET_BIT defined in lk-low.h. */
> +
> +size_t
> +lk_bitmap_find_next_bit (ULONGEST *bitmap, size_t size, size_t bit)
> +{
> + size_t ulong_size, bits_per_ulong, elt;
> +
> + ulong_size = lk_builtin_type_size (unsigned_long);
> + bits_per_ulong = ulong_size * LK_BITS_PER_BYTE;
> + elt = bit / bits_per_ulong;
> +
> + while (bit < size)
> + {
> + /* FIXME: Explain why using lsb0 bit order. */
> + if (bitmap[elt] & (1UL << (bit % bits_per_ulong)))
> + return bit;
> +
> + bit++;
> + if (bit % bits_per_ulong == 0)
> + elt++;
> + }
> +
> + return size;
> +}
> +
> +/* Returns the Hamming weight, i.e. number of set bits, of bitmap BITMAP
> + with size SIZE (in bits). */
> +
> +size_t
> +lk_bitmap_hweight (ULONGEST *bitmap, size_t size)
> +{
> + size_t ulong_size, bit, bits_per_ulong, elt, retval;
> +
> + ulong_size = lk_builtin_type_size (unsigned_long);
> + bits_per_ulong = ulong_size * LK_BITS_PER_BYTE;
> + elt = bit = 0;
> + retval = 0;
> +
> + while (bit < size)
> + {
> + if (bitmap[elt] & (1 << bit % bits_per_ulong))
> + retval++;
> +
> + bit++;
> + if (bit % bits_per_ulong == 0)
> + elt++;
> + }
> +
> + return retval;
> +}
> +
Could you add some unit tests to these operations to bitmap? I am
thinking that we need a class BitMap or BitVector later.
> +/* Provide the per_cpu_offset of cpu CPU. See comment in lk-low.h for
> + details. */
> +
> +CORE_ADDR
> +lk_get_percpu_offset (unsigned int cpu)
> +{
> + size_t ulong_size = lk_builtin_type_size (unsigned_long);
> + CORE_ADDR percpu_elt;
> +
> + /* Give the architecture a chance to overwrite default behaviour. */
> + if (LK_HOOK->get_percpu_offset)
> + return LK_HOOK->get_percpu_offset (cpu);
> +
> + percpu_elt = LK_ADDR (__per_cpu_offset) + (ulong_size * cpu);
> + return lk_read_addr (percpu_elt);
> +}
> +
> +
> +
> +/* Function for targets to_update_thread_list hook. */
> +
> +static void
> +lk_update_thread_list (struct target_ops *target)
> +{
> + prune_threads ();
> + lk_update_running_tasks ();
> + lk_update_sleeping_tasks ();
> +}
> +
> +/* Function for targets to_fetch_registers hook. */
> +
> +static void
> +lk_fetch_registers (struct target_ops *target,
> + struct regcache *regcache, int regnum)
> +{
> + CORE_ADDR task;
> + unsigned int cpu;
> +
> + task = (CORE_ADDR) regcache_get_ptid (regcache).tid ();
> + cpu = lk_task_running (task);
> +
> + /* Let the target beneath fetch registers of running tasks. */
> + if (cpu != LK_CPU_INVAL)
> + {
> + struct cleanup *old_inferior_ptid;
> +
> + old_inferior_ptid = save_inferior_ptid ();
> + inferior_ptid = lk_cpu_to_old_ptid (cpu);
Use make_scoped_restore (&inferior_ptid, lk_cpu_to_old_ptid (cpu));?
> + linux_kernel_ops->beneath->to_fetch_registers (target, regcache, regnum);
> + do_cleanups (old_inferior_ptid);
> + }
> + else
> + {
> + struct gdbarch *gdbarch;
> + unsigned int i;
> +
> + LK_HOOK->get_registers (task, target, regcache, regnum);
> +
> + /* Mark all registers not found as unavailable. */
> + gdbarch = get_regcache_arch (regcache);
> + for (i = 0; i < gdbarch_num_regs (gdbarch); i++)
> + {
> + if (regcache_register_status (regcache, i) == REG_UNKNOWN)
> + regcache_raw_supply (regcache, i, NULL);
> + }
> + }
> +}
> +
> +
> +/* Functions to initialize and free target_ops and its private data. As well
This line is too long.
> + as functions for targets to_open/close/detach hooks. */
> +
> +/* Check if OBFFILE is a Linux kernel. */
> +
> +
> +/* Initialize the cpu to old ptid map. Prefer the arch dependent
> + map_running_task_to_cpu hook if provided, else assume that the PID used
> + by target beneath is the same as in task_struct PID task_struct. See
> + comment on lk_ptid_map in lk-low.h for details. */
> +
> +static void
> +lk_init_ptid_map ()
> +{
> + struct thread_info *ti;
> + ULONGEST *cpu_online_mask;
> + size_t size;
> + unsigned int cpu;
> + struct cleanup *old_chain;
> +
> + if (LK_PRIVATE->old_ptid != NULL)
> + lk_free_ptid_map ();
> +
> + size = LK_BITMAP_SIZE (cpumask);
> + cpu_online_mask = lk_read_bitmap (LK_ADDR (cpu_online_mask), size);
> + old_chain = make_cleanup (xfree, cpu_online_mask);
> +
> + ALL_THREADS (ti)
> + {
> + struct lk_ptid_map *ptid_map = XCNEW (struct lk_ptid_map);
> + CORE_ADDR rq, curr;
> + int pid;
> +
> + /* Give the architecture a chance to overwrite default behaviour. */
> + if (LK_HOOK->map_running_task_to_cpu)
> + {
> + ptid_map->cpu = LK_HOOK->map_running_task_to_cpu (ti);
> + }
> + else
> + {
> + LK_BITMAP_FOR_EACH_SET_BIT (cpu_online_mask, size, cpu)
> + {
> + rq = LK_ADDR (runqueues) + lk_get_percpu_offset (cpu);
> + curr = lk_read_addr (rq + LK_OFFSET (rq, curr));
> + pid = lk_read_int (curr + LK_OFFSET (task_struct, pid));
> +
> + if (pid == ti->ptid.lwp ())
> + {
> + ptid_map->cpu = cpu;
> + break;
> + }
> + }
> + if (cpu == size)
> + error (_("Could not map thread with pid %d, lwp %lu to a cpu."),
> + ti->ptid.pid (), ti->ptid.lwp ());
> + }
> + ptid_map->old_ptid = ti->ptid;
> + ptid_map->next = LK_PRIVATE->old_ptid;
> + LK_PRIVATE->old_ptid = ptid_map;
> + }
> +
> + do_cleanups (old_chain);
> +}
> +
> diff --git a/gdb/lk-low.h b/gdb/lk-low.h
> new file mode 100644
> index 0000000000..be8c5556df
> --- /dev/null
> +++ b/gdb/lk-low.h
> @@ -0,0 +1,310 @@
> +/* Basic Linux kernel support, architecture independent.
> +
> + Copyright (C) 2016 Free Software Foundation, Inc.
> +
> + This file is part of GDB.
> +
> + This program is free software; you can redistribute it and/or modify
> + it under the terms of the GNU General Public License as published by
> + the Free Software Foundation; either version 3 of the License, or
> + (at your option) any later version.
> +
> + This program is distributed in the hope that it will be useful,
> + but WITHOUT ANY WARRANTY; without even the implied warranty of
> + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
> + GNU General Public License for more details.
> +
> + You should have received a copy of the GNU General Public License
> + along with this program. If not, see <http://www.gnu.org/licenses/>. */
> +
> +#ifndef __LK_LOW_H__
> +#define __LK_LOW_H__
> +
> +#include "target.h"
> +
> +extern struct target_ops *linux_kernel_ops;
> +
> +/* Copy constants defined in Linux kernel. */
> +#define LK_TASK_COMM_LEN 16
> +#define LK_BITS_PER_BYTE 8
> +
> +/* Definitions used in linux kernel target. */
> +#define LK_CPU_INVAL -1U
> +
> +/* Private data structs for this target. */
> +/* Forward declarations. */
> +struct lk_private_hooks;
> +struct lk_ptid_map;
> +
> +/* Short hand access to private data. */
> +#define LK_PRIVATE ((struct lk_private *) linux_kernel_ops->to_data)
> +#define LK_HOOK (LK_PRIVATE->hooks)
> +
> +struct lk_private
> +{
> + /* Hashtab for needed addresses, structs and fields. */
> + htab_t data;
> +
> + /* Linked list to map between cpu number and original ptid from target
> + beneath. */
> + struct lk_ptid_map *old_ptid;
> +
> + /* Hooks for architecture dependent functions. */
> + struct lk_private_hooks *hooks;
> +};
> +
> +/* We use the following convention for PTIDs:
> +
> + ptid->pid = inferiors PID
> + ptid->lwp = PID from task_stuct
> + ptid->tid = address of task_struct
> +
> + The task_structs address as TID has two reasons. First, we need it quite
> + often and there is no other reasonable way to pass it down. Second, it
> + helps us to distinguish swapper tasks as they all have PID = 0.
> +
> + Furthermore we cannot rely on the target beneath to use the same PID as the
> + task_struct. Thus we need a mapping between our PTID and the PTID of the
> + target beneath. Otherwise it is impossible to pass jobs, e.g. fetching
> + registers of running tasks, to the target beneath. */
Sorry, I don't understand this design. Can you elaborate?
> +
> +/* Private data struct to map between our and the target beneath PTID. */
> +
> +struct lk_ptid_map
> +{
> + struct lk_ptid_map *next;
Can you use C++ stl list instead?
> + unsigned int cpu;
> + ptid_t old_ptid;
> +};
> +
> +/* Private data struct to be stored in hashtab. */
> +
> +struct lk_private_data
> +{
> + const char *alias;
> +
> + union
> + {
> + CORE_ADDR addr;
> + struct type *type;
> + struct field *field;
> + } data;
> +};
> +
> +/* Wrapper for htab_hash_string to work with our private data. */
> +
> +static inline hashval_t
> +lk_hash_private_data (const struct lk_private_data *entry)
> +{
> + return htab_hash_string (entry->alias);
> +}
> +
> +/* Function for htab_eq to work with our private data. */
> +
> +static inline int
> +lk_private_data_eq (const struct lk_private_data *entry,
> + const struct lk_private_data *element)
> +{
> + return streq (entry->alias, element->alias);
> +}
> +
> +/* Wrapper for htab_find_slot to work with our private data. Do not use
> + directly, use the macros below instead. */
> +
> +static inline void **
> +lk_find_slot (const char *alias)
> +{
> + const struct lk_private_data dummy = { alias };
> + return htab_find_slot (LK_PRIVATE->data, &dummy, INSERT);
> +}
> +
> +/* Wrapper for htab_find to work with our private data. Do not use
> + directly, use the macros below instead. */
> +
> +static inline struct lk_private_data *
> +lk_find (const char *alias)
> +{
> + const struct lk_private_data dummy = { alias };
> + return (struct lk_private_data *) htab_find (LK_PRIVATE->data, &dummy);
> +}
> +
> +
> +struct lk_private_hooks
> +{
> + /* required */
> + lk_hook_get_registers get_registers;
> +
> + /* optional, required if __per_cpu_offset array is not used to determine
> + offset. */
> + lk_hook_get_percpu_offset get_percpu_offset;
> +
> + /* optional, required if the target beneath uses a different PID as struct
> + rq. */
> + lk_hook_map_running_task_to_cpu map_running_task_to_cpu;
> +};
The lk_private_hooks is still not a class. I raised this in v3 review,
https://sourceware.org/ml/gdb-patches/2017-05/msg00004.html
--
Yao (齐尧)
next prev parent reply other threads:[~2017-06-15 15:23 UTC|newest]
Thread overview: 24+ messages / expand[flat|nested] mbox.gz Atom feed top
2017-06-12 17:08 [RFC v4 0/9] Support for Linux kernel debugging Philipp Rudo
2017-06-12 17:08 ` [RFC v4 7/9] Add privileged registers for s390x Philipp Rudo
2017-06-19 13:34 ` Yao Qi
2017-06-19 15:46 ` Philipp Rudo
2017-06-12 17:08 ` [RFC v4 1/9] Convert substitute_path_component to C++ Philipp Rudo
2017-06-12 17:08 ` [RFC v4 3/9] Add basic Linux kernel support Philipp Rudo
2017-06-15 15:23 ` Yao Qi [this message]
2017-06-16 10:10 ` Philipp Rudo
2017-06-16 11:43 ` Pedro Alves
2017-06-19 7:56 ` Philipp Rudo
2017-06-19 9:52 ` Yao Qi
2017-06-19 13:35 ` Omair Javaid
2017-06-19 15:44 ` Philipp Rudo
2017-06-12 17:08 ` [RFC v4 4/9] Add kernel module support for linux-kernel target Philipp Rudo
2017-06-12 17:08 ` [RFC v4 2/9] Add libiberty/concat styled concat_path function Philipp Rudo
2017-06-12 17:08 ` [RFC v4 5/9] Add commands for linux-kernel target Philipp Rudo
2017-06-12 17:08 ` [RFC v4 8/9] Link frame_info to thread_info Philipp Rudo
2017-06-19 13:07 ` Yao Qi
2017-06-19 15:46 ` Philipp Rudo
2017-06-12 17:09 ` [RFC v4 6/9] Separate common s390-tdep.* from s390-linux-tdep.* Philipp Rudo
2017-06-12 17:39 ` [RFC v4 9/9] Add S390 support for linux-kernel target Philipp Rudo
2017-06-19 13:20 ` Yao Qi
2017-06-19 15:45 ` Philipp Rudo
2017-06-12 21:17 ` Philipp Rudo
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=86vanxtguc.fsf@gmail.com \
--to=qiyaoltc@gmail.com \
--cc=arnez@linux.vnet.ibm.com \
--cc=gdb-patches@sourceware.org \
--cc=omair.javaid@linaro.org \
--cc=peter.griffin@linaro.org \
--cc=prudo@linux.vnet.ibm.com \
--cc=yao.qi@linaro.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox