-
Notifications
You must be signed in to change notification settings - Fork 14.3k
Rename llvm::ThreadPool -> llvm::DefaultThreadPool (NFC) #83702
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Conversation
@llvm/pr-subscribers-clang @llvm/pr-subscribers-debuginfo Author: Mehdi Amini (joker-eph) ChangesThis is a breaking change: clients who use to create a ThreadPool must now create a DefaultThreadPool. Patch is 38.32 KiB, truncated to 20.00 KiB below, full version: https://github.com/llvm/llvm-project/pull/83702.diff 41 Files Affected:
diff --git a/bolt/lib/Core/ParallelUtilities.cpp b/bolt/lib/Core/ParallelUtilities.cpp
index 1a28bc4346ecd56..1f5ac5655d9f98d 100644
--- a/bolt/lib/Core/ParallelUtilities.cpp
+++ b/bolt/lib/Core/ParallelUtilities.cpp
@@ -106,7 +106,7 @@ ThreadPool &getThreadPool() {
if (ThreadPoolPtr.get())
return *ThreadPoolPtr;
- ThreadPoolPtr = std::make_unique<ThreadPool>(
+ ThreadPoolPtr = std::make_unique<DefaultThreadPool>(
llvm::hardware_concurrency(opts::ThreadCount));
return *ThreadPoolPtr;
}
diff --git a/bolt/tools/merge-fdata/merge-fdata.cpp b/bolt/tools/merge-fdata/merge-fdata.cpp
index c6dfd3cfdc56dea..f2ac5ad4492ee52 100644
--- a/bolt/tools/merge-fdata/merge-fdata.cpp
+++ b/bolt/tools/merge-fdata/merge-fdata.cpp
@@ -316,7 +316,7 @@ void mergeLegacyProfiles(const SmallVectorImpl<std::string> &Filenames) {
// least 4 tasks.
ThreadPoolStrategy S = optimal_concurrency(
std::max(Filenames.size() / 4, static_cast<size_t>(1)));
- ThreadPool Pool(S);
+ DefaultThreadPool Pool(S);
DenseMap<llvm::thread::id, ProfileTy> ParsedProfiles(
Pool.getMaxConcurrency());
for (const auto &Filename : Filenames)
diff --git a/lld/MachO/Writer.cpp b/lld/MachO/Writer.cpp
index 65b598d1d7c422a..9b0a32c136e8b17 100644
--- a/lld/MachO/Writer.cpp
+++ b/lld/MachO/Writer.cpp
@@ -66,7 +66,7 @@ class Writer {
template <class LP> void run();
- ThreadPool threadPool;
+ DefaultThreadPool threadPool;
std::unique_ptr<FileOutputBuffer> &buffer;
uint64_t addr = 0;
uint64_t fileOff = 0;
diff --git a/lldb/include/lldb/Core/Debugger.h b/lldb/include/lldb/Core/Debugger.h
index 418c2403d020f48..9c5ea22242f18d3 100644
--- a/lldb/include/lldb/Core/Debugger.h
+++ b/lldb/include/lldb/Core/Debugger.h
@@ -52,7 +52,7 @@
namespace llvm {
class raw_ostream;
-class ThreadPoolInterface;
+class ThreadPool;
} // namespace llvm
namespace lldb_private {
@@ -500,7 +500,7 @@ class Debugger : public std::enable_shared_from_this<Debugger>,
}
/// Shared thread pool. Use only with ThreadPoolTaskGroup.
- static llvm::ThreadPoolInterface &GetThreadPool();
+ static llvm::ThreadPool &GetThreadPool();
/// Report warning events.
///
diff --git a/lldb/source/Core/Debugger.cpp b/lldb/source/Core/Debugger.cpp
index 217474d1060ac28..c4b3c64e0844b05 100644
--- a/lldb/source/Core/Debugger.cpp
+++ b/lldb/source/Core/Debugger.cpp
@@ -609,7 +609,7 @@ void Debugger::Initialize(LoadPluginCallbackType load_plugin_callback) {
"Debugger::Initialize called more than once!");
g_debugger_list_mutex_ptr = new std::recursive_mutex();
g_debugger_list_ptr = new DebuggerList();
- g_thread_pool = new llvm::ThreadPool(llvm::optimal_concurrency());
+ g_thread_pool = new llvm::DefaultThreadPool(llvm::optimal_concurrency());
g_load_plugin_callback = load_plugin_callback;
}
diff --git a/llvm/docs/ORCv2.rst b/llvm/docs/ORCv2.rst
index add05e05a80e5fd..910ef5b9f3d02f8 100644
--- a/llvm/docs/ORCv2.rst
+++ b/llvm/docs/ORCv2.rst
@@ -738,7 +738,7 @@ or creating any Modules attached to it. E.g.
ThreadSafeContext TSCtx(std::make_unique<LLVMContext>());
- ThreadPool TP(NumThreads);
+ DefaultThreadPool TP(NumThreads);
JITStack J;
for (auto &ModulePath : ModulePaths) {
diff --git a/llvm/examples/SpeculativeJIT/SpeculativeJIT.cpp b/llvm/examples/SpeculativeJIT/SpeculativeJIT.cpp
index fdd376d82da5d89..0d97d379d2279e1 100644
--- a/llvm/examples/SpeculativeJIT/SpeculativeJIT.cpp
+++ b/llvm/examples/SpeculativeJIT/SpeculativeJIT.cpp
@@ -136,7 +136,7 @@ class SpeculativeJIT {
std::unique_ptr<ExecutionSession> ES;
DataLayout DL;
MangleAndInterner Mangle{*ES, DL};
- ThreadPool CompileThreads{llvm::hardware_concurrency(NumThreads)};
+ DefaultThreadPool CompileThreads{llvm::hardware_concurrency(NumThreads)};
JITDylib &MainJD;
diff --git a/llvm/include/llvm/Debuginfod/Debuginfod.h b/llvm/include/llvm/Debuginfod/Debuginfod.h
index 99fe15ad859794c..ef03948a706c06d 100644
--- a/llvm/include/llvm/Debuginfod/Debuginfod.h
+++ b/llvm/include/llvm/Debuginfod/Debuginfod.h
@@ -97,7 +97,7 @@ Expected<std::string> getCachedOrDownloadArtifact(
StringRef UniqueKey, StringRef UrlPath, StringRef CacheDirectoryPath,
ArrayRef<StringRef> DebuginfodUrls, std::chrono::milliseconds Timeout);
-class ThreadPoolInterface;
+class ThreadPool;
struct DebuginfodLogEntry {
std::string Message;
@@ -135,7 +135,7 @@ class DebuginfodCollection {
// error.
Expected<bool> updateIfStale();
DebuginfodLog &Log;
- ThreadPoolInterface &Pool;
+ ThreadPool &Pool;
Timer UpdateTimer;
sys::Mutex UpdateMutex;
@@ -145,7 +145,7 @@ class DebuginfodCollection {
public:
DebuginfodCollection(ArrayRef<StringRef> Paths, DebuginfodLog &Log,
- ThreadPoolInterface &Pool, double MinInterval);
+ ThreadPool &Pool, double MinInterval);
Error update();
Error updateForever(std::chrono::milliseconds Interval);
Expected<std::string> findDebugBinaryPath(object::BuildIDRef);
diff --git a/llvm/include/llvm/Support/BalancedPartitioning.h b/llvm/include/llvm/Support/BalancedPartitioning.h
index 9738e742f7f1e96..a8464ac0fe60e58 100644
--- a/llvm/include/llvm/Support/BalancedPartitioning.h
+++ b/llvm/include/llvm/Support/BalancedPartitioning.h
@@ -50,7 +50,7 @@
namespace llvm {
-class ThreadPoolInterface;
+class ThreadPool;
/// A function with a set of utility nodes where it is beneficial to order two
/// functions close together if they have similar utility nodes
class BPFunctionNode {
@@ -115,7 +115,7 @@ class BalancedPartitioning {
/// threads, so we need to track how many active threads that could spawn more
/// threads.
struct BPThreadPool {
- ThreadPoolInterface &TheThreadPool;
+ ThreadPool &TheThreadPool;
std::mutex mtx;
std::condition_variable cv;
/// The number of threads that could spawn more threads
@@ -128,8 +128,7 @@ class BalancedPartitioning {
/// acceptable for other threads to add more tasks while blocking on this
/// call.
void wait();
- BPThreadPool(ThreadPoolInterface &TheThreadPool)
- : TheThreadPool(TheThreadPool) {}
+ BPThreadPool(ThreadPool &TheThreadPool) : TheThreadPool(TheThreadPool) {}
};
/// Run a recursive bisection of a given list of FunctionNodes
diff --git a/llvm/include/llvm/Support/ThreadPool.h b/llvm/include/llvm/Support/ThreadPool.h
index 93f02729f047aa0..736c88964ca12ef 100644
--- a/llvm/include/llvm/Support/ThreadPool.h
+++ b/llvm/include/llvm/Support/ThreadPool.h
@@ -46,7 +46,7 @@ class ThreadPoolTaskGroup;
/// available threads are used up by tasks waiting for a task that has no thread
/// left to run on (this includes waiting on the returned future). It should be
/// generally safe to wait() for a group as long as groups do not form a cycle.
-class ThreadPoolInterface {
+class ThreadPool {
/// The actual method to enqueue a task to be defined by the concrete
/// implementation.
virtual void asyncEnqueue(std::function<void()> Task,
@@ -55,7 +55,7 @@ class ThreadPoolInterface {
public:
/// Destroying the pool will drain the pending tasks and wait. The current
/// thread may participate in the execution of the pending tasks.
- virtual ~ThreadPoolInterface();
+ virtual ~ThreadPool();
/// Blocking wait for all the threads to complete and the queue to be empty.
/// It is an error to try to add new tasks while blocking on this call.
@@ -121,7 +121,7 @@ class ThreadPoolInterface {
///
/// The pool keeps a vector of threads alive, waiting on a condition variable
/// for some work to become available.
-class StdThreadPool : public ThreadPoolInterface {
+class StdThreadPool : public ThreadPool {
public:
/// Construct a pool using the hardware strategy \p S for mapping hardware
/// execution resources (threads, cores, CPUs)
@@ -212,11 +212,10 @@ class StdThreadPool : public ThreadPoolInterface {
/// Maximum number of threads to potentially grow this pool to.
const unsigned MaxThreadCount;
};
-
-#endif // LLVM_ENABLE_THREADS Disabled
+#endif // LLVM_ENABLE_THREADS
/// A non-threaded implementation.
-class SingleThreadExecutor : public ThreadPoolInterface {
+class SingleThreadExecutor : public ThreadPool {
public:
/// Construct a non-threaded pool, ignoring using the hardware strategy.
SingleThreadExecutor(ThreadPoolStrategy ignored = {});
@@ -253,9 +252,9 @@ class SingleThreadExecutor : public ThreadPoolInterface {
};
#if LLVM_ENABLE_THREADS
-using ThreadPool = StdThreadPool;
+using DefaultThreadPool = StdThreadPool;
#else
-using ThreadPool = SingleThreadExecutor;
+using DefaultThreadPool = SingleThreadExecutor;
#endif
/// A group of tasks to be run on a thread pool. Thread pool tasks in different
@@ -265,7 +264,7 @@ using ThreadPool = SingleThreadExecutor;
class ThreadPoolTaskGroup {
public:
/// The ThreadPool argument is the thread pool to forward calls to.
- ThreadPoolTaskGroup(ThreadPoolInterface &Pool) : Pool(Pool) {}
+ ThreadPoolTaskGroup(ThreadPool &Pool) : Pool(Pool) {}
/// Blocking destructor: will wait for all the tasks in the group to complete
/// by calling ThreadPool::wait().
@@ -282,7 +281,7 @@ class ThreadPoolTaskGroup {
void wait() { Pool.wait(*this); }
private:
- ThreadPoolInterface &Pool;
+ ThreadPool &Pool;
};
} // namespace llvm
diff --git a/llvm/lib/CodeGen/ParallelCG.cpp b/llvm/lib/CodeGen/ParallelCG.cpp
index 43b23368ead2706..ceb64b2badab566 100644
--- a/llvm/lib/CodeGen/ParallelCG.cpp
+++ b/llvm/lib/CodeGen/ParallelCG.cpp
@@ -52,7 +52,7 @@ void llvm::splitCodeGen(
// Create ThreadPool in nested scope so that threads will be joined
// on destruction.
{
- ThreadPool CodegenThreadPool(hardware_concurrency(OSs.size()));
+ DefaultThreadPool CodegenThreadPool(hardware_concurrency(OSs.size()));
int ThreadCount = 0;
SplitModule(
diff --git a/llvm/lib/DWARFLinker/Classic/DWARFLinker.cpp b/llvm/lib/DWARFLinker/Classic/DWARFLinker.cpp
index 4f5a4e2ffc702ae..9b581a6c9ab7744 100644
--- a/llvm/lib/DWARFLinker/Classic/DWARFLinker.cpp
+++ b/llvm/lib/DWARFLinker/Classic/DWARFLinker.cpp
@@ -2935,7 +2935,7 @@ Error DWARFLinker::link() {
}
EmitLambda();
} else {
- ThreadPool Pool(hardware_concurrency(2));
+ DefaultThreadPool Pool(hardware_concurrency(2));
Pool.async(AnalyzeAll);
Pool.async(CloneAll);
Pool.wait();
diff --git a/llvm/lib/DWARFLinker/Parallel/DWARFLinkerImpl.cpp b/llvm/lib/DWARFLinker/Parallel/DWARFLinkerImpl.cpp
index a052969e74c0f8d..49b08997eb9c1c1 100644
--- a/llvm/lib/DWARFLinker/Parallel/DWARFLinkerImpl.cpp
+++ b/llvm/lib/DWARFLinker/Parallel/DWARFLinkerImpl.cpp
@@ -192,7 +192,7 @@ Error DWARFLinkerImpl::link() {
Context->InputDWARFFile.unload();
}
} else {
- ThreadPool Pool(llvm::parallel::strategy);
+ DefaultThreadPool Pool(llvm::parallel::strategy);
for (std::unique_ptr<LinkContext> &Context : ObjectContexts)
Pool.async([&]() {
// Link object file.
diff --git a/llvm/lib/DebugInfo/GSYM/DwarfTransformer.cpp b/llvm/lib/DebugInfo/GSYM/DwarfTransformer.cpp
index 3a28cd412de9291..ff6b560d11726b6 100644
--- a/llvm/lib/DebugInfo/GSYM/DwarfTransformer.cpp
+++ b/llvm/lib/DebugInfo/GSYM/DwarfTransformer.cpp
@@ -601,7 +601,7 @@ Error DwarfTransformer::convert(uint32_t NumThreads, OutputAggregator &Out) {
// Now parse all DIEs in case we have cross compile unit references in a
// thread pool.
- ThreadPool pool(hardware_concurrency(NumThreads));
+ DefaultThreadPool pool(hardware_concurrency(NumThreads));
for (const auto &CU : DICtx.compile_units())
pool.async([&CU]() { CU->getUnitDIE(false /*CUDieOnly*/); });
pool.wait();
diff --git a/llvm/lib/Debuginfod/Debuginfod.cpp b/llvm/lib/Debuginfod/Debuginfod.cpp
index 4c785117ae8ef77..1cf550721000eb3 100644
--- a/llvm/lib/Debuginfod/Debuginfod.cpp
+++ b/llvm/lib/Debuginfod/Debuginfod.cpp
@@ -348,8 +348,7 @@ DebuginfodLogEntry DebuginfodLog::pop() {
}
DebuginfodCollection::DebuginfodCollection(ArrayRef<StringRef> PathsRef,
- DebuginfodLog &Log,
- ThreadPoolInterface &Pool,
+ DebuginfodLog &Log, ThreadPool &Pool,
double MinInterval)
: Log(Log), Pool(Pool), MinInterval(MinInterval) {
for (StringRef Path : PathsRef)
diff --git a/llvm/lib/ExecutionEngine/Orc/LLJIT.cpp b/llvm/lib/ExecutionEngine/Orc/LLJIT.cpp
index 833dcb9d5bf2e72..79adda5b7bc0341 100644
--- a/llvm/lib/ExecutionEngine/Orc/LLJIT.cpp
+++ b/llvm/lib/ExecutionEngine/Orc/LLJIT.cpp
@@ -972,8 +972,8 @@ LLJIT::LLJIT(LLJITBuilderState &S, Error &Err)
if (S.NumCompileThreads > 0) {
InitHelperTransformLayer->setCloneToNewContextOnEmit(true);
- CompileThreads =
- std::make_unique<ThreadPool>(hardware_concurrency(S.NumCompileThreads));
+ CompileThreads = std::make_unique<DefaultThreadPool>(
+ hardware_concurrency(S.NumCompileThreads));
ES->setDispatchTask([this](std::unique_ptr<Task> T) {
// FIXME: We should be able to use move-capture here, but ThreadPool's
// AsyncTaskTys are std::functions rather than unique_functions
diff --git a/llvm/lib/LTO/LTO.cpp b/llvm/lib/LTO/LTO.cpp
index 34a49c8588b2f73..9c93ec70da77643 100644
--- a/llvm/lib/LTO/LTO.cpp
+++ b/llvm/lib/LTO/LTO.cpp
@@ -1409,7 +1409,7 @@ class lto::ThinBackendProc {
namespace {
class InProcessThinBackend : public ThinBackendProc {
- ThreadPool BackendThreadPool;
+ DefaultThreadPool BackendThreadPool;
AddStreamFn AddStream;
FileCache Cache;
std::set<GlobalValue::GUID> CfiFunctionDefs;
diff --git a/llvm/lib/LTO/LTOBackend.cpp b/llvm/lib/LTO/LTOBackend.cpp
index 6cfe67779b1a7de..71e8849dc3cc915 100644
--- a/llvm/lib/LTO/LTOBackend.cpp
+++ b/llvm/lib/LTO/LTOBackend.cpp
@@ -431,7 +431,7 @@ static void splitCodeGen(const Config &C, TargetMachine *TM,
AddStreamFn AddStream,
unsigned ParallelCodeGenParallelismLevel, Module &Mod,
const ModuleSummaryIndex &CombinedIndex) {
- ThreadPool CodegenThreadPool(
+ DefaultThreadPool CodegenThreadPool(
heavyweight_hardware_concurrency(ParallelCodeGenParallelismLevel));
unsigned ThreadCount = 0;
const Target *T = &TM->getTarget();
diff --git a/llvm/lib/LTO/ThinLTOCodeGenerator.cpp b/llvm/lib/LTO/ThinLTOCodeGenerator.cpp
index 8fd181846f0c4c1..8f517eb50dc76fc 100644
--- a/llvm/lib/LTO/ThinLTOCodeGenerator.cpp
+++ b/llvm/lib/LTO/ThinLTOCodeGenerator.cpp
@@ -980,7 +980,7 @@ void ThinLTOCodeGenerator::run() {
if (CodeGenOnly) {
// Perform only parallel codegen and return.
- ThreadPool Pool;
+ DefaultThreadPool Pool;
int count = 0;
for (auto &Mod : Modules) {
Pool.async([&](int count) {
@@ -1126,7 +1126,7 @@ void ThinLTOCodeGenerator::run() {
// Parallel optimizer + codegen
{
- ThreadPool Pool(heavyweight_hardware_concurrency(ThreadCount));
+ DefaultThreadPool Pool(heavyweight_hardware_concurrency(ThreadCount));
for (auto IndexCount : ModulesOrdering) {
auto &Mod = Modules[IndexCount];
Pool.async([&](int count) {
diff --git a/llvm/lib/Support/BalancedPartitioning.cpp b/llvm/lib/Support/BalancedPartitioning.cpp
index cb6ba61179941fd..f4254b50d26c914 100644
--- a/llvm/lib/Support/BalancedPartitioning.cpp
+++ b/llvm/lib/Support/BalancedPartitioning.cpp
@@ -82,7 +82,7 @@ void BalancedPartitioning::run(std::vector<BPFunctionNode> &Nodes) const {
Nodes.size(), Config.SplitDepth, Config.IterationsPerSplit));
std::optional<BPThreadPool> TP;
#if LLVM_ENABLE_THREADS
- ThreadPool TheThreadPool;
+ DefaultThreadPool TheThreadPool;
if (Config.TaskSplitDepth > 1)
TP.emplace(TheThreadPool);
#endif
diff --git a/llvm/lib/Support/ThreadPool.cpp b/llvm/lib/Support/ThreadPool.cpp
index 27e0f220ac4ed6f..9a26a2281c6cb16 100644
--- a/llvm/lib/Support/ThreadPool.cpp
+++ b/llvm/lib/Support/ThreadPool.cpp
@@ -20,7 +20,7 @@
using namespace llvm;
-ThreadPoolInterface::~ThreadPoolInterface() = default;
+ThreadPool::~ThreadPool() = default;
// A note on thread groups: Tasks are by default in no group (represented
// by nullptr ThreadPoolTaskGroup pointer in the Tasks queue) and functionality
diff --git a/llvm/tools/dsymutil/dsymutil.cpp b/llvm/tools/dsymutil/dsymutil.cpp
index b0e988c6f8e4b8c..25e281c415e75a9 100644
--- a/llvm/tools/dsymutil/dsymutil.cpp
+++ b/llvm/tools/dsymutil/dsymutil.cpp
@@ -734,7 +734,7 @@ int dsymutil_main(int argc, char **argv, const llvm::ToolContext &) {
S.ThreadsRequested = DebugMapPtrsOrErr->size();
S.Limit = true;
}
- ThreadPool Threads(S);
+ DefaultThreadPool Threads(S);
// If there is more than one link to execute, we need to generate
// temporary files.
diff --git a/llvm/tools/llvm-cov/CodeCoverage.cpp b/llvm/tools/llvm-cov/CodeCoverage.cpp
index 049e89d1a230034..1e5bfbe5c3aade5 100644
--- a/llvm/tools/llvm-cov/CodeCoverage.cpp
+++ b/llvm/tools/llvm-cov/CodeCoverage.cpp
@@ -1217,7 +1217,7 @@ int CodeCoverageTool::doShow(int argc, const char **argv,
ShowFilenames);
} else {
// In -output-dir mode, it's safe to use multiple threads to print files.
- ThreadPool Pool(S);
+ DefaultThreadPool Pool(S);
for (const std::string &SourceFile : SourceFiles)
Pool.async(&CodeCoverageTool::writeSourceFileView, this, SourceFile,
Coverage.get(), Printer.get(), ShowFilenames);
diff --git a/llvm/tools/llvm-cov/CoverageExporterJson.cpp b/llvm/tools/llvm-cov/CoverageExporterJson.cpp
index a424bbe06e0ecd2..9a8c7c94f06124d 100644
--- a/llvm/tools/llvm-cov/CoverageExporterJson.cpp
+++ b/llvm/tools/llvm-cov/CoverageExporterJson.cpp
@@ -277,7 +277,7 @@ json::Array renderFiles(const coverage::CoverageMapping &Coverage,
S = heavyweight_hardware_concurrency(SourceFiles.size());
S.Limit = true;
}
- ThreadPool Pool(S);
+ DefaultThreadPool Pool(S);
json::Array FileArray;
std::mutex FileArrayMutex;
diff --git a/llvm/tools/llvm-cov/CoverageReport.cpp b/llvm/tools/llvm-cov/CoverageReport.cpp
index 8cc073e4def8fc9..49a35f2a943e6fb 100644
--- a/llvm/tools/llvm-cov/CoverageReport.cpp
+++ b/llvm/tools/llvm-cov/CoverageReport.cpp
@@ -465,7 +465,7 @@ std::vector<FileCoverageSummary> CoverageReport::prepareFileReports(
S = heavyweight_hardware_concurrency(Files.size());
S.Limit = true;
}
- ThreadPool Pool(S);
+ DefaultThreadPool Pool(S);
std::vector<FileCoverageSummary> FileReports;
FileReports.reserve(Files.size());
@@ -580,7 +580,7 @@ Expected<FileCoverageSummary> DirectoryCoverageReport::prepareDirectoryReports(
PoolS = heavyweight_hardware_concurrency(Files.size());
PoolS.Limit = true;
}
- ThreadPool Pool(PoolS);
+ DefaultThreadPool Pool(PoolS);
TPool = &Pool;
LCPStack = {RootLCP};
diff --git a/llvm/tools/llvm-cov/CoverageReport.h b/llvm/tools/llvm-cov/CoverageReport.h
index b25ed5e35f9a77d..60f751ca9675287 100644
--- a/llvm/tools/llvm-cov/CoverageReport.h
+++ b/llvm/tools/llvm-cov/CoverageReport.h
@@ -20,7 +20,7 @@
namespace llvm {
-class ThreadPoolInterface;
+class ThreadPool;
/// Displays the code coverage report.
class CoverageReport {
@@ -104,7 +104,7 @@ class DirectoryCoverageReport {
/// For calling CoverageReport::prepareSingleFileReport asynchronously
/// in prepareSubDirectoryReports(). It's not intended to be modified by
/// generateSubDirectoryReport().
- ThreadPoolInterface *TPool;
+ ThreadPool *TPool;
/// One report level may correspond to multiple directory levels as we omit
/// directories which have only one subentry. So we use this Stack to track
diff --git a/llvm/tools/llvm-debuginfod/llvm-debuginfod.cpp b/llvm/tools/llvm-debuginfod/llvm-debuginfod.cpp
index 9d347dbd68f3956..44d656148a4e2cd 100644
--- a/llvm/tools/llvm-debuginfod/llvm-debuginfod.cpp
+++ b/llvm/tools/llvm-debuginfod/llvm-debuginfod.cpp
@@ -127,7 +127,7 @@ int llvm_debuginfod_main(int argc, char **argv, const llvm::ToolContext &) {
for (const std::string &Path : ScanPaths)
Paths.push_back(Path);
- ThreadPool Pool(hardware_concurrency(MaxConcurrency));
+ DefaultThreadPool Pool(hardware_concurrency(MaxConcurrency));
DebuginfodLog Log;
DebuginfodCollection Collection(Paths, Log, Pool, MinInterval);
DebuginfodServ...
[truncated]
|
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Makes sense to me
I don't have really firm feelings/justification for this, but I'd have guessed that doing this as two separate (separated by a few days, maybe a week) renamings would be better for downstream consumers - they'd get a clear break without any ambiguity/name reuse. No strong feels if other folks reckon doing it in one go is better/OK, though. |
@dwblaikie : how would you split it? I didn't quite get the two renamings you have in mind? |
One patch Changing both in one patch risks folks getting confusing error messages because their existing ThreadPool usage will now instantly start being interpreted as the interface type - resulting in different/confusing errors about inability to instantiate abstract types, etc, presumably. Rather than just that the name is no longer present at all. Ultimately they can root cause and figure out both renamings - probably not a huge deal either way, but my understanding was that separating them might be marginally better for downstrteamers. |
Hear, hear. |
Gotcha, thanks for elaborating, somehow my brain was slow on a Monday. |
5f657d8
to
08a5dde
Compare
I did the first part of the renaming @dwblaikie : looks good? |
Hmm - this patch still seems to have both renamings in it, if I'm reading the PR correctly? I take it from the subject that isn't the intent/the intent is that his patch only does the ThreadPool->DefaultThreadPool change? |
Oh, maybe issues related to layered patches - this is intended to be submitted after the introduction of the ThreadPoolInterface? But includes those changes in this review at the moment (I still haven't figured out what we're doing for dependent changes - and I thought the ThreadPoolInterface-introducing patch was already in - so not sure why it appears in this patch too) |
Actually no: the first patch landed already, so I will push these ahead as a NFC change and rebase, that'll reduce the diff here. |
Here is the extracted one: #84056 And the diff here is now clean (stacked on top of #84056 ) How does it look now @dwblaikie ? |
1b407d9
to
ea79b60
Compare
The base class llvm::ThreadPoolInterface will be renamed llvm::ThreadPool in a subsequent commit.
@@ -104,7 +104,7 @@ static std::recursive_mutex *g_debugger_list_mutex_ptr = | |||
nullptr; // NOTE: intentional leak to avoid issues with C++ destructor chain | |||
static Debugger::DebuggerList *g_debugger_list_ptr = | |||
nullptr; // NOTE: intentional leak to avoid issues with C++ destructor chain | |||
static llvm::ThreadPool *g_thread_pool = nullptr; | |||
static llvm::DefaultThreadPoolThreadPool *g_thread_pool = nullptr; |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Weirdly this issue wasn't caught in CI!
The base class llvm::ThreadPoolInterface will be renamed llvm::ThreadPool
in a subsequent commit.
This is a breaking change: clients who use to create a ThreadPool must now
create a DefaultThreadPool.