[gdbsupport] Use task size in parallel_for_each Ensuring a fair distribution over the worker threads and main thread in terms of number of CUs might not be the most efficient way, given that CUs can vary in size. Fix this by: - adding a task_size_ptr parameter to parallel_for_each, defaulting to nullptr, - using per_cu->get_length () as the task size in the parallel_for_each in dwarf2_build_psymtabs_hard, and - using the task size in parallel_for_each to distribute similarly-sized tasks to the threads. I've used this experiment to verify the performance impact: ... $ for n in $(seq 1 10); do \ time gdb -q -batch ~/firefox/libxul.so-93.0-1.1.x86_64.debug \ 2>&1 \ | grep "real:"; \ done ... and without the patch got: ... real: 4.71 real: 4.88 real: 4.29 real: 4.30 real: 4.65 real: 4.27 real: 4.27 real: 4.27 real: 4.75 real: 4.41 ... and with the patch: ... real: 3.68 real: 3.81 real: 3.80 real: 3.68 real: 3.75 real: 3.69 real: 3.69 real: 3.74 real: 3.67 real: 3.74 ... so that seems a reasonable improvement. With parallel_for_each_debug set to true, we get some more detail about the difference in behaviour. Without the patch we have: ... Parallel for: n_elements: 2818 Parallel for: minimum elements per thread: 1 Parallel for: elts_per_thread: 704 Parallel for: elements on worker thread 0 : 705 Parallel for: elements on worker thread 1 : 705 Parallel for: elements on worker thread 2 : 704 Parallel for: elements on worker thread 3 : 0 Parallel for: elements on main thread : 704 ... and with the patch: ... Parallel for: n_elements: 2818 Parallel for: total_size: 1483674865 Parallel for: size_per_thread: 370918716 Parallel for: elements on worker thread 0 : 752 (size: 371811790) Parallel for: elements on worker thread 1 : 360 (size: 371509370) Parallel for: elements on worker thread 2 : 1130 (size: 372681710) Parallel for: elements on worker thread 3 : 0 (size: 0) Parallel for: elements on main thread : 576 (size: 367671995) ... Tested on x86_64-linux. --- gdb/dwarf2/read.c | 8 +++- gdbsupport/parallel-for.h | 97 +++++++++++++++++++++++++++++++++++++---------- 2 files changed, 83 insertions(+), 22 deletions(-) diff --git a/gdb/dwarf2/read.c b/gdb/dwarf2/read.c index 42230607fe0..23c3873cba6 100644 --- a/gdb/dwarf2/read.c +++ b/gdb/dwarf2/read.c @@ -7067,6 +7067,12 @@ dwarf2_build_psymtabs_hard (dwarf2_per_objfile *per_objfile) using iter_type = decltype (per_bfd->all_comp_units.begin ()); + auto task_size = [=] (iter_type iter) + { + dwarf2_per_cu_data *per_cu = iter->get (); + return per_cu->length (); + }; + /* Each thread returns a pair holding a cooked index, and a vector of errors that should be printed. The latter is done because GDB's I/O system is not thread-safe. run_on_main_thread could be @@ -7095,7 +7101,7 @@ dwarf2_build_psymtabs_hard (dwarf2_per_objfile *per_objfile) } } return result_type (thread_storage.release (), std::move (errors)); - }); + }, task_size); /* Only show a given exception a single time. */ std::unordered_set seen_exceptions; diff --git a/gdbsupport/parallel-for.h b/gdbsupport/parallel-for.h index 0037ee23ff3..e6f784eafd5 100644 --- a/gdbsupport/parallel-for.h +++ b/gdbsupport/parallel-for.h @@ -23,6 +23,7 @@ #include #include #include "gdbsupport/thread-pool.h" +#include "gdbsupport/function-view.h" namespace gdb { @@ -134,7 +135,9 @@ typename gdb::detail::par_for_accumulator< typename std::result_of::type >::result_type parallel_for_each (unsigned n, RandomIt first, RandomIt last, - RangeFunction callback) + RangeFunction callback, + gdb::function_view task_size + = nullptr) { using result_type = typename std::result_of::type; @@ -148,17 +151,32 @@ parallel_for_each (unsigned n, RandomIt first, RandomIt last, size_t n_elements = last - first; size_t elts_per_thread = 0; size_t elts_left_over = 0; + size_t total_size = 0; + size_t size_per_thread = 0; if (n_threads > 1) { - /* Require that there should be at least N elements in a - thread. */ - gdb_assert (n > 0); - if (n_elements / n_threads < n) - n_threads = std::max (n_elements / n, (size_t) 1); - elts_per_thread = n_elements / n_threads; - elts_left_over = n_elements % n_threads; - /* n_elements == n_threads * elts_per_thread + elts_left_over. */ + if (task_size != nullptr) + { + gdb_assert (n == 1); + for (RandomIt i = first; i != last; ++i) + { + size_t s = (size_t)task_size (i); + total_size += s; + } + size_per_thread = total_size / n_threads; + } + else + { + /* Require that there should be at least N elements in a + thread. */ + gdb_assert (n > 0); + if (n_elements / n_threads < n) + n_threads = std::max (n_elements / n, (size_t) 1); + elts_per_thread = n_elements / n_threads; + elts_left_over = n_elements % n_threads; + /* n_elements == n_threads * elts_per_thread + elts_left_over. */ + } } size_t count = n_threads == 0 ? 0 : n_threads - 1; @@ -167,20 +185,47 @@ parallel_for_each (unsigned n, RandomIt first, RandomIt last, if (parallel_for_each_debug) { debug_printf (_("Parallel for: n_elements: %zu\n"), n_elements); - debug_printf (_("Parallel for: minimum elements per thread: %u\n"), n); - debug_printf (_("Parallel for: elts_per_thread: %zu\n"), elts_per_thread); + if (task_size != nullptr) + { + debug_printf (_("Parallel for: total_size: %zu\n"), total_size); + debug_printf (_("Parallel for: size_per_thread: %zu\n"), size_per_thread); + } + else + { + debug_printf (_("Parallel for: minimum elements per thread: %u\n"), n); + debug_printf (_("Parallel for: elts_per_thread: %zu\n"), elts_per_thread); + } } + size_t remaining_size = total_size; for (int i = 0; i < count; ++i) { - RandomIt end = first + elts_per_thread; - if (i < elts_left_over) - /* Distribute the leftovers over the worker threads, to avoid having - to handle all of them in a single thread. */ - end++; + RandomIt end; + size_t s = 0; + if (task_size == nullptr) + { + end = first + elts_per_thread; + if (i < elts_left_over) + /* Distribute the leftovers over the worker threads, to avoid having + to handle all of them in a single thread. */ + end++; + } + else + { + RandomIt j; + for (j = first; j < last && s < size_per_thread; ++j) + s += (size_t)(task_size) (j); + end = j; + remaining_size -= s; + } if (parallel_for_each_debug) - debug_printf (_("Parallel for: elements on worker thread %i\t: %zu\n"), - i, (size_t)(end - first)); + { + debug_printf (_("Parallel for: elements on worker thread %i\t: %zu"), + i, (size_t)(end - first)); + if (task_size != nullptr) + debug_printf (_("\t(size: %zu)"), s); + debug_printf (_("\n")); + } results.post (i, [=] () { return callback (first, end); @@ -190,12 +235,22 @@ parallel_for_each (unsigned n, RandomIt first, RandomIt last, for (int i = count; i < n_worker_threads; ++i) if (parallel_for_each_debug) - debug_printf (_("Parallel for: elements on worker thread %i\t: 0\n"), i); + { + debug_printf (_("Parallel for: elements on worker thread %i\t: 0"), i); + if (task_size != nullptr) + debug_printf (_("\t(size: 0)")); + debug_printf (_("\n")); + } /* Process all the remaining elements in the main thread. */ if (parallel_for_each_debug) - debug_printf (_("Parallel for: elements on main thread\t\t: %zu\n"), - (size_t)(last - first)); + { + debug_printf (_("Parallel for: elements on main thread\t\t: %zu"), + (size_t)(last - first)); + if (task_size != nullptr) + debug_printf (_("\t(size: %zu)"), remaining_size); + debug_printf (_("\n")); + } return results.finish ([=] () { return callback (first, last);