From: simon.marchi@polymtl.ca
To: gdb-patches@sourceware.org
Cc: Simon Marchi <simon.marchi@efficios.com>
Subject: [PATCH v2 2/4] gdbsupport: factor out work queue from parallel-for.h
Date: Thu, 3 Jul 2025 16:01:17 -0400 [thread overview]
Message-ID: <20250703200130.4095761-2-simon.marchi@polymtl.ca> (raw)
In-Reply-To: <20250703200130.4095761-1-simon.marchi@polymtl.ca>
From: Simon Marchi <simon.marchi@efficios.com>
New in v2: add some consts
In preparation for a following patch that will re-use the shared work
queue algorithm, move it to a separate class.
Change-Id: Id05cf8898a5d162048fa8fa056fbf7e0441bfb78
---
gdbsupport/parallel-for.h | 45 +++++--------------
gdbsupport/work-queue.h | 95 +++++++++++++++++++++++++++++++++++++++
2 files changed, 107 insertions(+), 33 deletions(-)
create mode 100644 gdbsupport/work-queue.h
diff --git a/gdbsupport/parallel-for.h b/gdbsupport/parallel-for.h
index bb41eb8700f0..c5a15d3235a7 100644
--- a/gdbsupport/parallel-for.h
+++ b/gdbsupport/parallel-for.h
@@ -23,6 +23,7 @@
#include <algorithm>
#include <tuple>
#include "gdbsupport/thread-pool.h"
+#include "gdbsupport/work-queue.h"
namespace gdb
{
@@ -57,12 +58,8 @@ parallel_for_each (const RandomIt first, const RandomIt last,
debug_printf ("Parallel for: batch size: %zu\n", batch_size);
}
- const size_t n_worker_threads
- = std::max<size_t> (thread_pool::g_thread_pool->thread_count (), 1);
std::vector<gdb::future<void>> results;
-
- /* The next item to hand out. */
- std::atomic<RandomIt> next = first;
+ work_queue<RandomIt, batch_size> queue (first, last);
/* The worker thread task.
@@ -75,49 +72,31 @@ parallel_for_each (const RandomIt first, const RandomIt last,
and `args` can be used as-is in the lambda. */
auto args_tuple
= std::forward_as_tuple (std::forward<WorkerArgs> (worker_args)...);
- auto task =
- [&next, first, last, n_worker_threads, &args_tuple] () {
+ auto task = [&queue, first, &args_tuple] () {
/* Instantiate the user-defined worker. */
auto worker = std::make_from_tuple<Worker> (args_tuple);
for (;;)
{
- /* Grab a snapshot of NEXT. */
- auto local_next = next.load ();
- gdb_assert (local_next <= last);
+ const auto [batch_first, batch_last] = queue.pop_batch ();
- /* Number of remaining items. */
- auto n_remaining = last - local_next;
- gdb_assert (n_remaining >= 0);
-
- /* Are we done? */
- if (n_remaining == 0)
+ if (batch_first == batch_last)
break;
- const auto this_batch_size
- = std::min<std::size_t> (batch_size, n_remaining);
-
- /* The range to process in this iteration. */
- const auto this_batch_first = local_next;
- const auto this_batch_last = local_next + this_batch_size;
-
- /* Update NEXT. If the current value of NEXT doesn't match
- LOCAL_NEXT, it means another thread updated it concurrently,
- restart. */
- if (!next.compare_exchange_weak (local_next, this_batch_last))
- continue;
-
if (parallel_for_each_debug)
debug_printf ("Processing %zu items, range [%zu, %zu[\n",
- this_batch_size,
- static_cast<size_t> (this_batch_first - first),
- static_cast<size_t> (this_batch_last - first));
+ static_cast<std::size_t> (batch_last - batch_first),
+ static_cast<std::size_t> (batch_first - first),
+ static_cast<std::size_t> (batch_last - first));
- worker (this_batch_first, this_batch_last);
+ worker (batch_first, batch_last);
}
};
/* Start N_WORKER_THREADS tasks. */
+ const size_t n_worker_threads
+ = std::max<size_t> (thread_pool::g_thread_pool->thread_count (), 1);
+
for (int i = 0; i < n_worker_threads; ++i)
results.push_back (gdb::thread_pool::g_thread_pool->post_task (task));
diff --git a/gdbsupport/work-queue.h b/gdbsupport/work-queue.h
new file mode 100644
index 000000000000..f84b29a4f523
--- /dev/null
+++ b/gdbsupport/work-queue.h
@@ -0,0 +1,95 @@
+/* Synchronized work queue.
+
+ Copyright (C) 2025 Free Software Foundation, Inc.
+
+ This file is part of GDB.
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see <http://www.gnu.org/licenses/>. */
+
+#ifndef GDBSUPPORT_WORK_QUEUE_H
+#define GDBSUPPORT_WORK_QUEUE_H
+
+namespace gdb
+{
+
+/* Implementation of a thread-safe work queue.
+
+ The work items are specified by two iterators of type RandomIt.
+
+ BATCH_SIZE is the number of work items to pop in a batch. */
+
+template<typename RandomIt, std::size_t batch_size>
+class work_queue
+{
+public:
+ /* The work items are specified by the range `[first, last[`. */
+ work_queue (RandomIt first, RandomIt last) noexcept
+ : m_next (first),
+ m_last (last)
+ {
+ gdb_assert (first <= last);
+ }
+
+ DISABLE_COPY_AND_ASSIGN (work_queue);
+
+ /* Pop a batch of work items.
+
+ The return value is a pair containing iterators to the first and last
+ (exclusive) work items. */
+ std::pair<RandomIt, RandomIt> pop_batch () noexcept
+ {
+ for (;;)
+ {
+ /* Grab a snapshot of M_NEXT. */
+ auto next = m_next.load ();
+ gdb_assert (next <= m_last);
+
+ /* The number of items remaining in the queue. */
+ const auto n_remaining = static_cast<std::size_t> (m_last - next);
+
+ /* Are we done? */
+ if (n_remaining == 0)
+ return std::make_pair (m_last, m_last);
+
+ /* The batch size is proportional to the number of items remaining in
+ the queue. We do this to try to stike a balance, avoiding
+ synchronization overhead when there are many items to process at the
+ start, and avoiding workload imbalance when there are few items to
+ process at the end. */
+ const auto this_batch_size = std::min (batch_size, n_remaining);
+
+ /* The range of items in this batch. */
+ const auto this_batch_first = next;
+ const auto this_batch_last = next + this_batch_size;
+
+ /* Update M_NEXT. If the current value of M_NEXT doesn't match NEXT, it
+ means another thread updated it concurrently, restart. */
+ if (!m_next.compare_exchange_weak (next, this_batch_last))
+ continue;
+
+ return std::make_pair (this_batch_first, this_batch_last);
+ }
+ }
+
+private:
+ /* The next work item to hand out. */
+ std::atomic<RandomIt> m_next;
+
+ /* The end of the work item range. */
+ RandomIt m_last;
+};
+
+} /* namespace gdb */
+
+#endif /* GDBSUPPORT_WORK_QUEUE_H */
--
2.50.0
next prev parent reply other threads:[~2025-07-03 20:02 UTC|newest]
Thread overview: 18+ messages / expand[flat|nested] mbox.gz Atom feed top
2025-07-03 20:01 [PATCH v2 1/4] gdbsupport: use dynamic partitioning in gdb::parallel_for_each simon.marchi
2025-07-03 20:01 ` simon.marchi [this message]
2025-09-18 19:07 ` [PATCH v2 2/4] gdbsupport: factor out work queue from parallel-for.h Tom Tromey
2025-09-19 18:06 ` Simon Marchi
2025-07-03 20:01 ` [PATCH v2 3/4] gdbsupport: add async parallel_for_each version simon.marchi
2025-09-18 19:16 ` Tom Tromey
2025-09-19 20:12 ` Simon Marchi
2025-07-03 20:01 ` [PATCH v2 4/4] gdb/dwarf: use dynamic partitioning for DWARF CU indexing simon.marchi
2025-09-18 19:26 ` Tom Tromey
2025-09-18 19:46 ` Simon Marchi
2025-09-18 19:53 ` Tom Tromey
2025-09-19 20:15 ` Simon Marchi
2025-08-05 17:01 ` [PATCH v2 1/4] gdbsupport: use dynamic partitioning in gdb::parallel_for_each Simon Marchi
2025-08-28 17:00 ` Simon Marchi
2025-09-08 17:23 ` Simon Marchi
2025-09-18 18:59 ` Tom Tromey
2025-09-19 17:47 ` Simon Marchi
2025-09-19 18:04 ` Tom Tromey
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20250703200130.4095761-2-simon.marchi@polymtl.ca \
--to=simon.marchi@polymtl.ca \
--cc=gdb-patches@sourceware.org \
--cc=simon.marchi@efficios.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox