Mirror of the gdb-patches mailing list
 help / color / mirror / Atom feed
From: Simon Marchi <simon.marchi@efficios.com>
To: gdb-patches@sourceware.org
Cc: Simon Marchi <simon.marchi@efficios.com>
Subject: [PATCH 4/6] gdbsupport: factor out work queue from parallel-for.h
Date: Mon,  5 May 2025 16:15:28 -0400	[thread overview]
Message-ID: <20250505201548.184917-4-simon.marchi@efficios.com> (raw)
In-Reply-To: <20250505201548.184917-1-simon.marchi@efficios.com>

In preparation for a following patch that will re-use the shared work
queue algorithm, move it to a separate class.

Change-Id: Id05cf8898a5d162048fa8fa056fbf7e0441bfb78
---
 gdbsupport/parallel-for.h | 45 +++++--------------
 gdbsupport/work-queue.h   | 95 +++++++++++++++++++++++++++++++++++++++
 2 files changed, 107 insertions(+), 33 deletions(-)
 create mode 100644 gdbsupport/work-queue.h

diff --git a/gdbsupport/parallel-for.h b/gdbsupport/parallel-for.h
index bb41eb8700f0..c5a15d3235a7 100644
--- a/gdbsupport/parallel-for.h
+++ b/gdbsupport/parallel-for.h
@@ -23,6 +23,7 @@
 #include <algorithm>
 #include <tuple>
 #include "gdbsupport/thread-pool.h"
+#include "gdbsupport/work-queue.h"
 
 namespace gdb
 {
@@ -57,12 +58,8 @@ parallel_for_each (const RandomIt first, const RandomIt last,
       debug_printf ("Parallel for: batch size: %zu\n", batch_size);
     }
 
-  const size_t n_worker_threads
-    = std::max<size_t> (thread_pool::g_thread_pool->thread_count (), 1);
   std::vector<gdb::future<void>> results;
-
-  /* The next item to hand out.  */
-  std::atomic<RandomIt> next = first;
+  work_queue<RandomIt, batch_size> queue (first, last);
 
   /* The worker thread task.
 
@@ -75,49 +72,31 @@ parallel_for_each (const RandomIt first, const RandomIt last,
      and `args` can be used as-is in the lambda.  */
   auto args_tuple
     = std::forward_as_tuple (std::forward<WorkerArgs> (worker_args)...);
-  auto task =
-    [&next, first, last, n_worker_threads, &args_tuple] () {
+  auto task = [&queue, first, &args_tuple] () {
       /* Instantiate the user-defined worker.  */
       auto worker = std::make_from_tuple<Worker> (args_tuple);
 
       for (;;)
 	{
-	  /* Grab a snapshot of NEXT.  */
-	  auto local_next = next.load ();
-	  gdb_assert (local_next <= last);
+	  const auto [batch_first, batch_last] = queue.pop_batch ();
 
-	  /* Number of remaining items.  */
-	  auto n_remaining = last - local_next;
-	  gdb_assert (n_remaining >= 0);
-
-	  /* Are we done?  */
-	  if (n_remaining == 0)
+	  if (batch_first == batch_last)
 	    break;
 
-	  const auto this_batch_size
-	    = std::min<std::size_t> (batch_size, n_remaining);
-
-	  /* The range to process in this iteration.  */
-	  const auto this_batch_first = local_next;
-	  const auto this_batch_last = local_next + this_batch_size;
-
-	  /* Update NEXT.  If the current value of NEXT doesn't match
-	     LOCAL_NEXT, it means another thread updated it concurrently,
-	     restart.  */
-	  if (!next.compare_exchange_weak (local_next, this_batch_last))
-	    continue;
-
 	  if (parallel_for_each_debug)
 	    debug_printf ("Processing %zu items, range [%zu, %zu[\n",
-			  this_batch_size,
-			  static_cast<size_t> (this_batch_first - first),
-			  static_cast<size_t> (this_batch_last - first));
+			  static_cast<std::size_t> (batch_last - batch_first),
+			  static_cast<std::size_t> (batch_first - first),
+			  static_cast<std::size_t> (batch_last - first));
 
-	  worker (this_batch_first, this_batch_last);
+	  worker (batch_first, batch_last);
 	}
     };
 
   /* Start N_WORKER_THREADS tasks.  */
+  const size_t n_worker_threads
+    = std::max<size_t> (thread_pool::g_thread_pool->thread_count (), 1);
+
   for (int i = 0; i < n_worker_threads; ++i)
     results.push_back (gdb::thread_pool::g_thread_pool->post_task (task));
 
diff --git a/gdbsupport/work-queue.h b/gdbsupport/work-queue.h
new file mode 100644
index 000000000000..f84b29a4f523
--- /dev/null
+++ b/gdbsupport/work-queue.h
@@ -0,0 +1,95 @@
+/* Synchronized work queue.
+
+   Copyright (C) 2025 Free Software Foundation, Inc.
+
+   This file is part of GDB.
+
+   This program is free software; you can redistribute it and/or modify
+   it under the terms of the GNU General Public License as published by
+   the Free Software Foundation; either version 3 of the License, or
+   (at your option) any later version.
+
+   This program is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+   GNU General Public License for more details.
+
+   You should have received a copy of the GNU General Public License
+   along with this program.  If not, see <http://www.gnu.org/licenses/>.  */
+
+#ifndef GDBSUPPORT_WORK_QUEUE_H
+#define GDBSUPPORT_WORK_QUEUE_H
+
+namespace gdb
+{
+
+/* Implementation of a thread-safe work queue.
+
+   The work items are specified by two iterators of type RandomIt.
+
+   BATCH_SIZE is the number of work items to pop in a batch.  */
+
+template<typename RandomIt, std::size_t batch_size>
+class work_queue
+{
+public:
+  /* The work items are specified by the range `[first, last[`.  */
+  work_queue (RandomIt first, RandomIt last) noexcept
+    : m_next (first),
+      m_last (last)
+  {
+    gdb_assert (first <= last);
+  }
+
+  DISABLE_COPY_AND_ASSIGN (work_queue);
+
+  /* Pop a batch of work items.
+
+     The return value is a pair containing iterators to the first and last
+     (exclusive) work items.  */
+  std::pair<RandomIt, RandomIt> pop_batch () noexcept
+  {
+    for (;;)
+      {
+	/* Grab a snapshot of M_NEXT.  */
+	auto next = m_next.load ();
+	gdb_assert (next <= m_last);
+
+	/* The number of items remaining in the queue.  */
+	const auto n_remaining = static_cast<std::size_t> (m_last - next);
+
+	/* Are we done?  */
+	if (n_remaining == 0)
+	  return std::make_pair (m_last, m_last);
+
+	/* The batch size is proportional to the number of items remaining in
+	   the queue.  We do this to try to stike a balance, avoiding
+	   synchronization overhead when there are many items to process at the
+	   start, and avoiding workload imbalance when there are few items to
+	   process at the end.  */
+	const auto this_batch_size = std::min (batch_size, n_remaining);
+
+	/* The range of items in this batch.  */
+	const auto this_batch_first = next;
+	const auto this_batch_last = next + this_batch_size;
+
+	/* Update M_NEXT.  If the current value of M_NEXT doesn't match NEXT, it
+	   means another thread updated it concurrently, restart.  */
+	if (!m_next.compare_exchange_weak (next, this_batch_last))
+	  continue;
+
+	return std::make_pair (this_batch_first, this_batch_last);
+      }
+  }
+
+private:
+  /* The next work item to hand out.  */
+  std::atomic<RandomIt> m_next;
+
+  /* The end of the work item range.  */
+  RandomIt m_last;
+};
+
+} /* namespace gdb */
+
+#endif /* GDBSUPPORT_WORK_QUEUE_H */
-- 
2.49.0


  parent reply	other threads:[~2025-05-05 20:17 UTC|newest]

Thread overview: 22+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2025-05-05 20:15 [PATCH 1/6] gdb: re-work parallel-for-selftests.c Simon Marchi
2025-05-05 20:15 ` [PATCH 2/6] gdbsupport: make gdb::parallel_for_each's n parameter a template parameter Simon Marchi
2025-06-13 17:55   ` Tom Tromey
2025-06-13 18:43     ` Simon Marchi
2025-05-05 20:15 ` [PATCH 3/6] gdbsupport: use dynamic partitioning in gdb::parallel_for_each Simon Marchi
2025-06-13 18:29   ` Tom Tromey
2025-06-13 19:22     ` Simon Marchi
2025-06-13 19:56       ` Simon Marchi
2025-06-13 20:12         ` Simon Marchi
2025-06-26 19:27           ` Simon Marchi
2025-07-03 19:23             ` Tom Tromey
2025-07-03 19:36               ` Simon Marchi
2025-05-05 20:15 ` Simon Marchi [this message]
2025-06-13 18:33   ` [PATCH 4/6] gdbsupport: factor out work queue from parallel-for.h Tom Tromey
2025-06-13 19:24     ` Simon Marchi
2025-05-05 20:15 ` [PATCH 5/6] gdbsupport: add async parallel_for_each version Simon Marchi
2025-06-13 18:39   ` Tom Tromey
2025-06-13 19:29     ` Simon Marchi
2025-05-05 20:15 ` [PATCH 6/6] gdb/dwarf: use dynamic partitioning for DWARF CU indexing Simon Marchi
2025-05-27 14:44 ` [PATCH 1/6] gdb: re-work parallel-for-selftests.c Simon Marchi
2025-06-13 17:48 ` Tom Tromey
2025-06-13 18:38   ` Simon Marchi

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20250505201548.184917-4-simon.marchi@efficios.com \
    --to=simon.marchi@efficios.com \
    --cc=gdb-patches@sourceware.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox