@@ -32,11 +32,8 @@ namespace llvm {
32
32
33
33
class ThreadPoolTaskGroup ;
34
34
35
- // / A ThreadPool for asynchronous parallel execution on a defined number of
36
- // / threads.
37
- // /
38
- // / The pool keeps a vector of threads alive, waiting on a condition variable
39
- // / for some work to become available.
35
+ // / This defines the abstract base interface for a ThreadPool allowing
36
+ // / asynchronous parallel execution on a defined number of threads.
40
37
// /
41
38
// / It is possible to reuse one thread pool for different groups of tasks
42
39
// / by grouping tasks using ThreadPoolTaskGroup. All tasks are processed using
@@ -49,16 +46,31 @@ class ThreadPoolTaskGroup;
49
46
// / available threads are used up by tasks waiting for a task that has no thread
50
47
// / left to run on (this includes waiting on the returned future). It should be
51
48
// / generally safe to wait() for a group as long as groups do not form a cycle.
52
- class ThreadPool {
49
+ class ThreadPoolInterface {
50
+ // / The actual method to enqueue a task to be defined by the concrete
51
+ // / implementation.
52
+ virtual void asyncEnqueue (std::function<void ()> Task,
53
+ ThreadPoolTaskGroup *Group) = 0;
54
+
53
55
public:
54
- // / Construct a pool using the hardware strategy \p S for mapping hardware
55
- // / execution resources (threads, cores, CPUs)
56
- // / Defaults to using the maximum execution resources in the system, but
57
- // / accounting for the affinity mask.
58
- ThreadPool (ThreadPoolStrategy S = hardware_concurrency());
56
+ // / Destroying the pool will drain the pending tasks and wait. The current
57
+ // / thread may participate in the execution of the pending tasks.
58
+ virtual ~ThreadPoolInterface ();
59
59
60
- // / Blocking destructor: the pool will wait for all the threads to complete.
61
- ~ThreadPool ();
60
+ // / Blocking wait for all the threads to complete and the queue to be empty.
61
+ // / It is an error to try to add new tasks while blocking on this call.
62
+ // / Calling wait() from a task would deadlock waiting for itself.
63
+ virtual void wait () = 0;
64
+
65
+ // / Blocking wait for only all the threads in the given group to complete.
66
+ // / It is possible to wait even inside a task, but waiting (directly or
67
+ // / indirectly) on itself will deadlock. If called from a task running on a
68
+ // / worker thread, the call may process pending tasks while waiting in order
69
+ // / not to waste the thread.
70
+ virtual void wait (ThreadPoolTaskGroup &Group) = 0;
71
+
72
+ // / Returns the maximum number of worker this pool can eventually grow to.
73
+ virtual unsigned getMaxConcurrency () const = 0;
62
74
63
75
// / Asynchronous submission of a task to the pool. The returned future can be
64
76
// / used to wait for the task to finish and is *non-blocking* on destruction.
@@ -92,102 +104,85 @@ class ThreadPool {
92
104
&Group);
93
105
}
94
106
107
+ private:
108
+ // / Asynchronous submission of a task to the pool. The returned future can be
109
+ // / used to wait for the task to finish and is *non-blocking* on destruction.
110
+ template <typename ResTy>
111
+ std::shared_future<ResTy> asyncImpl (std::function<ResTy()> Task,
112
+ ThreadPoolTaskGroup *Group) {
113
+ auto Future = std::async (std::launch::deferred, std::move (Task)).share ();
114
+ asyncEnqueue ([Future]() { Future.wait (); }, Group);
115
+ return Future;
116
+ }
117
+ };
118
+
119
+ #if LLVM_ENABLE_THREADS
120
+ // / A ThreadPool implementation using std::threads.
121
+ // /
122
+ // / The pool keeps a vector of threads alive, waiting on a condition variable
123
+ // / for some work to become available.
124
+ class StdThreadPool : public ThreadPoolInterface {
125
+ public:
126
+ // / Construct a pool using the hardware strategy \p S for mapping hardware
127
+ // / execution resources (threads, cores, CPUs)
128
+ // / Defaults to using the maximum execution resources in the system, but
129
+ // / accounting for the affinity mask.
130
+ StdThreadPool (ThreadPoolStrategy S = hardware_concurrency());
131
+
132
+ // / Blocking destructor: the pool will wait for all the threads to complete.
133
+ ~StdThreadPool () override ;
134
+
95
135
// / Blocking wait for all the threads to complete and the queue to be empty.
96
136
// / It is an error to try to add new tasks while blocking on this call.
97
137
// / Calling wait() from a task would deadlock waiting for itself.
98
- void wait ();
138
+ void wait () override ;
99
139
100
140
// / Blocking wait for only all the threads in the given group to complete.
101
141
// / It is possible to wait even inside a task, but waiting (directly or
102
142
// / indirectly) on itself will deadlock. If called from a task running on a
103
143
// / worker thread, the call may process pending tasks while waiting in order
104
144
// / not to waste the thread.
105
- void wait (ThreadPoolTaskGroup &Group);
145
+ void wait (ThreadPoolTaskGroup &Group) override ;
106
146
107
- // Returns the maximum number of worker threads in the pool, not the current
108
- // number of threads!
109
- unsigned getMaxConcurrency () const { return MaxThreadCount; }
147
+ // / Returns the maximum number of worker threads in the pool, not the current
148
+ // / number of threads!
149
+ unsigned getMaxConcurrency () const override { return MaxThreadCount; }
110
150
111
- // TODO: misleading legacy name warning!
151
+ // TODO: Remove, misleading legacy name warning!
112
152
LLVM_DEPRECATED (" Use getMaxConcurrency instead" , " getMaxConcurrency" )
113
153
unsigned getThreadCount () const { return MaxThreadCount; }
114
154
115
155
// / Returns true if the current thread is a worker thread of this thread pool.
116
156
bool isWorkerThread () const ;
117
157
118
158
private:
119
- // / Helpers to create a promise and a callable wrapper of \p Task that sets
120
- // / the result of the promise. Returns the callable and a future to access the
121
- // / result.
122
- template <typename ResTy>
123
- static std::pair<std::function<void ()>, std::future<ResTy>>
124
- createTaskAndFuture (std::function<ResTy()> Task) {
125
- std::shared_ptr<std::promise<ResTy>> Promise =
126
- std::make_shared<std::promise<ResTy>>();
127
- auto F = Promise->get_future ();
128
- return {
129
- [Promise = std::move (Promise), Task]() { Promise->set_value (Task ()); },
130
- std::move (F)};
131
- }
132
- static std::pair<std::function<void ()>, std::future<void >>
133
- createTaskAndFuture (std::function<void ()> Task) {
134
- std::shared_ptr<std::promise<void >> Promise =
135
- std::make_shared<std::promise<void >>();
136
- auto F = Promise->get_future ();
137
- return {[Promise = std::move (Promise), Task]() {
138
- Task ();
139
- Promise->set_value ();
140
- },
141
- std::move (F)};
142
- }
143
-
144
159
// / Returns true if all tasks in the given group have finished (nullptr means
145
160
// / all tasks regardless of their group). QueueLock must be locked.
146
161
bool workCompletedUnlocked (ThreadPoolTaskGroup *Group) const ;
147
162
148
163
// / Asynchronous submission of a task to the pool. The returned future can be
149
164
// / used to wait for the task to finish and is *non-blocking* on destruction.
150
- template <typename ResTy>
151
- std::shared_future<ResTy> asyncImpl (std::function<ResTy()> Task,
152
- ThreadPoolTaskGroup *Group) {
153
-
154
- #if LLVM_ENABLE_THREADS
155
- // / Wrap the Task in a std::function<void()> that sets the result of the
156
- // / corresponding future.
157
- auto R = createTaskAndFuture (Task);
158
-
165
+ void asyncEnqueue (std::function<void ()> Task,
166
+ ThreadPoolTaskGroup *Group) override {
159
167
int requestedThreads;
160
168
{
161
169
// Lock the queue and push the new task
162
170
std::unique_lock<std::mutex> LockGuard (QueueLock);
163
171
164
172
// Don't allow enqueueing after disabling the pool
165
173
assert (EnableFlag && " Queuing a thread during ThreadPool destruction" );
166
- Tasks.emplace_back (std::make_pair (std::move (R. first ), Group));
174
+ Tasks.emplace_back (std::make_pair (std::move (Task ), Group));
167
175
requestedThreads = ActiveThreads + Tasks.size ();
168
176
}
169
177
QueueCondition.notify_one ();
170
178
grow (requestedThreads);
171
- return R.second .share ();
172
-
173
- #else // LLVM_ENABLE_THREADS Disabled
174
-
175
- // Get a Future with launch::deferred execution using std::async
176
- auto Future = std::async (std::launch::deferred, std::move (Task)).share ();
177
- // Wrap the future so that both ThreadPool::wait() can operate and the
178
- // returned future can be sync'ed on.
179
- Tasks.emplace_back (std::make_pair ([Future]() { Future.get (); }, Group));
180
- return Future;
181
- #endif
182
179
}
183
180
184
- #if LLVM_ENABLE_THREADS
185
- // Grow to ensure that we have at least `requested` Threads, but do not go
186
- // over MaxThreadCount.
181
+ // / Grow to ensure that we have at least `requested` Threads, but do not go
182
+ // / over MaxThreadCount.
187
183
void grow (int requested);
188
184
189
185
void processTasks (ThreadPoolTaskGroup *WaitingForGroup);
190
- #endif
191
186
192
187
// / Threads in flight
193
188
std::vector<llvm::thread> Threads;
@@ -209,25 +204,68 @@ class ThreadPool {
209
204
// / Number of threads active for tasks in the given group (only non-zero).
210
205
DenseMap<ThreadPoolTaskGroup *, unsigned > ActiveGroups;
211
206
212
- #if LLVM_ENABLE_THREADS // avoids warning for unused variable
213
207
// / Signal for the destruction of the pool, asking thread to exit.
214
208
bool EnableFlag = true ;
215
- #endif
216
209
217
210
const ThreadPoolStrategy Strategy;
218
211
219
212
// / Maximum number of threads to potentially grow this pool to.
220
213
const unsigned MaxThreadCount;
221
214
};
222
215
216
+ #endif // LLVM_ENABLE_THREADS Disabled
217
+
218
+ // / A non-threaded implementation.
219
+ class SingleThreadExecutor : public ThreadPoolInterface {
220
+ public:
221
+ // / Construct a non-threaded pool, ignoring using the hardware strategy.
222
+ SingleThreadExecutor (ThreadPoolStrategy ignored = {});
223
+
224
+ // / Blocking destructor: the pool will first execute the pending tasks.
225
+ ~SingleThreadExecutor () override ;
226
+
227
+ // / Blocking wait for all the tasks to execute first
228
+ void wait () override ;
229
+
230
+ // / Blocking wait for only all the tasks in the given group to complete.
231
+ void wait (ThreadPoolTaskGroup &Group) override ;
232
+
233
+ // / Returns always 1: there is no concurrency.
234
+ unsigned getMaxConcurrency () const override { return 1 ; }
235
+
236
+ // TODO: Remove, misleading legacy name warning!
237
+ LLVM_DEPRECATED (" Use getMaxConcurrency instead" , " getMaxConcurrency" )
238
+ unsigned getThreadCount () const { return 1 ; }
239
+
240
+ // / Returns true if the current thread is a worker thread of this thread pool.
241
+ bool isWorkerThread () const ;
242
+
243
+ private:
244
+ // / Asynchronous submission of a task to the pool. The returned future can be
245
+ // / used to wait for the task to finish and is *non-blocking* on destruction.
246
+ void asyncEnqueue (std::function<void ()> Task,
247
+ ThreadPoolTaskGroup *Group) override {
248
+ Tasks.emplace_back (std::make_pair (std::move (Task), Group));
249
+ }
250
+
251
+ // / Tasks waiting for execution in the pool.
252
+ std::deque<std::pair<std::function<void ()>, ThreadPoolTaskGroup *>> Tasks;
253
+ };
254
+
255
+ #if LLVM_ENABLE_THREADS
256
+ using ThreadPool = StdThreadPool;
257
+ #else
258
+ using ThreadPool = SingleThreadExecutor;
259
+ #endif
260
+
223
261
// / A group of tasks to be run on a thread pool. Thread pool tasks in different
224
262
// / groups can run on the same threadpool but can be waited for separately.
225
263
// / It is even possible for tasks of one group to submit and wait for tasks
226
264
// / of another group, as long as this does not form a loop.
227
265
class ThreadPoolTaskGroup {
228
266
public:
229
267
// / The ThreadPool argument is the thread pool to forward calls to.
230
- ThreadPoolTaskGroup (ThreadPool &Pool) : Pool(Pool) {}
268
+ ThreadPoolTaskGroup (ThreadPoolInterface &Pool) : Pool(Pool) {}
231
269
232
270
// / Blocking destructor: will wait for all the tasks in the group to complete
233
271
// / by calling ThreadPool::wait().
@@ -244,7 +282,7 @@ class ThreadPoolTaskGroup {
244
282
void wait () { Pool.wait (*this ); }
245
283
246
284
private:
247
- ThreadPool &Pool;
285
+ ThreadPoolInterface &Pool;
248
286
};
249
287
250
288
} // namespace llvm
0 commit comments