@@ -32,11 +32,8 @@ namespace llvm {
32
32
33
33
class ThreadPoolTaskGroup ;
34
34
35
- // / A ThreadPool for asynchronous parallel execution on a defined number of
36
- // / threads.
37
- // /
38
- // / The pool keeps a vector of threads alive, waiting on a condition variable
39
- // / for some work to become available.
35
+ // / This defines the abstract base interface for a ThreadPool allowing
36
+ // / asynchronous parallel execution on a defined number of threads.
40
37
// /
41
38
// / It is possible to reuse one thread pool for different groups of tasks
42
39
// / by grouping tasks using ThreadPoolTaskGroup. All tasks are processed using
@@ -49,16 +46,31 @@ class ThreadPoolTaskGroup;
49
46
// / available threads are used up by tasks waiting for a task that has no thread
50
47
// / left to run on (this includes waiting on the returned future). It should be
51
48
// / generally safe to wait() for a group as long as groups do not form a cycle.
52
- class ThreadPool {
49
+ class ThreadPoolInterface {
50
+ // / The actual method to enqueue a task to be defined by the concrete
51
+ // / implementation.
52
+ virtual void asyncEnqueue (std::function<void ()> Task,
53
+ ThreadPoolTaskGroup *Group) = 0;
54
+
53
55
public:
54
- // / Construct a pool using the hardware strategy \p S for mapping hardware
55
- // / execution resources (threads, cores, CPUs)
56
- // / Defaults to using the maximum execution resources in the system, but
57
- // / accounting for the affinity mask.
58
- ThreadPool (ThreadPoolStrategy S = hardware_concurrency());
56
+ // / Destroying the pool will drain the pending tasks and wait. The current
57
+ // / thread may participate in the execution of the pending tasks.
58
+ virtual ~ThreadPoolInterface ();
59
59
60
- // / Blocking destructor: the pool will wait for all the threads to complete.
61
- ~ThreadPool ();
60
+ // / Blocking wait for all the threads to complete and the queue to be empty.
61
+ // / It is an error to try to add new tasks while blocking on this call.
62
+ // / Calling wait() from a task would deadlock waiting for itself.
63
+ virtual void wait () = 0;
64
+
65
+ // / Blocking wait for only all the threads in the given group to complete.
66
+ // / It is possible to wait even inside a task, but waiting (directly or
67
+ // / indirectly) on itself will deadlock. If called from a task running on a
68
+ // / worker thread, the call may process pending tasks while waiting in order
69
+ // / not to waste the thread.
70
+ virtual void wait (ThreadPoolTaskGroup &Group) = 0;
71
+
72
+ // / Returns the maximum number of worker this pool can eventually grow to.
73
+ virtual unsigned getMaxConcurrency () const = 0;
62
74
63
75
// / Asynchronous submission of a task to the pool. The returned future can be
64
76
// / used to wait for the task to finish and is *non-blocking* on destruction.
@@ -92,30 +104,32 @@ class ThreadPool {
92
104
&Group);
93
105
}
94
106
95
- // / Blocking wait for all the threads to complete and the queue to be empty.
96
- // / It is an error to try to add new tasks while blocking on this call.
97
- // / Calling wait() from a task would deadlock waiting for itself.
98
- void wait ();
107
+ private:
108
+ // / Asynchronous submission of a task to the pool. The returned future can be
109
+ // / used to wait for the task to finish and is *non-blocking* on destruction.
110
+ template <typename ResTy>
111
+ std::shared_future<ResTy> asyncImpl (std::function<ResTy()> Task,
112
+ ThreadPoolTaskGroup *Group) {
99
113
100
- // / Blocking wait for only all the threads in the given group to complete.
101
- // / It is possible to wait even inside a task, but waiting (directly or
102
- // / indirectly) on itself will deadlock. If called from a task running on a
103
- // / worker thread, the call may process pending tasks while waiting in order
104
- // / not to waste the thread.
105
- void wait (ThreadPoolTaskGroup &Group);
114
+ #if LLVM_ENABLE_THREADS
115
+ // / Wrap the Task in a std::function<void()> that sets the result of the
116
+ // / corresponding future.
117
+ auto R = createTaskAndFuture (Task);
106
118
107
- // Returns the maximum number of worker threads in the pool, not the current
108
- // number of threads!
109
- unsigned getMaxConcurrency () const { return MaxThreadCount; }
119
+ asyncEnqueue (std::move (R.first ), Group);
120
+ return R.second .share ();
110
121
111
- // TODO: misleading legacy name warning!
112
- LLVM_DEPRECATED (" Use getMaxConcurrency instead" , " getMaxConcurrency" )
113
- unsigned getThreadCount () const { return MaxThreadCount; }
122
+ #else // LLVM_ENABLE_THREADS Disabled
114
123
115
- // / Returns true if the current thread is a worker thread of this thread pool.
116
- bool isWorkerThread () const ;
124
+ // Get a Future with launch::deferred execution using std::async
125
+ auto Future = std::async (std::launch::deferred, std::move (Task)).share ();
126
+ // Wrap the future so that both ThreadPool::wait() can operate and the
127
+ // returned future can be sync'ed on.
128
+ asyncEnqueue ([Future]() { Future.get (); }, Group);
129
+ return Future;
130
+ #endif
131
+ }
117
132
118
- private:
119
133
// / Helpers to create a promise and a callable wrapper of \p Task that sets
120
134
// / the result of the promise. Returns the callable and a future to access the
121
135
// / result.
@@ -140,50 +154,74 @@ class ThreadPool {
140
154
},
141
155
std::move (F)};
142
156
}
157
+ };
158
+
159
+ // / A ThreadPool implementation using std::threads.
160
+ // /
161
+ // / The pool keeps a vector of threads alive, waiting on a condition variable
162
+ // / for some work to become available.
163
+ class ThreadPool : public ThreadPoolInterface {
164
+ public:
165
+ // / Construct a pool using the hardware strategy \p S for mapping hardware
166
+ // / execution resources (threads, cores, CPUs)
167
+ // / Defaults to using the maximum execution resources in the system, but
168
+ // / accounting for the affinity mask.
169
+ ThreadPool (ThreadPoolStrategy S = hardware_concurrency());
170
+
171
+ // / Blocking destructor: the pool will wait for all the threads to complete.
172
+ ~ThreadPool () override ;
173
+
174
+ // / Blocking wait for all the threads to complete and the queue to be empty.
175
+ // / It is an error to try to add new tasks while blocking on this call.
176
+ // / Calling wait() from a task would deadlock waiting for itself.
177
+ void wait () override ;
178
+
179
+ // / Blocking wait for only all the threads in the given group to complete.
180
+ // / It is possible to wait even inside a task, but waiting (directly or
181
+ // / indirectly) on itself will deadlock. If called from a task running on a
182
+ // / worker thread, the call may process pending tasks while waiting in order
183
+ // / not to waste the thread.
184
+ void wait (ThreadPoolTaskGroup &Group) override ;
143
185
186
+ // / Returns the maximum number of worker threads in the pool, not the current
187
+ // / number of threads!
188
+ unsigned getMaxConcurrency () const override { return MaxThreadCount; }
189
+
190
+ // TODO: Remove, misleading legacy name warning!
191
+ LLVM_DEPRECATED (" Use getMaxConcurrency instead" , " getMaxConcurrency" )
192
+ unsigned getThreadCount () const { return MaxThreadCount; }
193
+
194
+ // / Returns true if the current thread is a worker thread of this thread pool.
195
+ bool isWorkerThread () const ;
196
+
197
+ private:
144
198
// / Returns true if all tasks in the given group have finished (nullptr means
145
199
// / all tasks regardless of their group). QueueLock must be locked.
146
200
bool workCompletedUnlocked (ThreadPoolTaskGroup *Group) const ;
147
201
148
202
// / Asynchronous submission of a task to the pool. The returned future can be
149
203
// / used to wait for the task to finish and is *non-blocking* on destruction.
150
- template <typename ResTy>
151
- std::shared_future<ResTy> asyncImpl (std::function<ResTy()> Task,
152
- ThreadPoolTaskGroup *Group) {
153
-
204
+ void asyncEnqueue (std::function<void ()> Task,
205
+ ThreadPoolTaskGroup *Group) override {
154
206
#if LLVM_ENABLE_THREADS
155
- // / Wrap the Task in a std::function<void()> that sets the result of the
156
- // / corresponding future.
157
- auto R = createTaskAndFuture (Task);
158
-
159
207
int requestedThreads;
160
208
{
161
209
// Lock the queue and push the new task
162
210
std::unique_lock<std::mutex> LockGuard (QueueLock);
163
211
164
212
// Don't allow enqueueing after disabling the pool
165
213
assert (EnableFlag && " Queuing a thread during ThreadPool destruction" );
166
- Tasks.emplace_back (std::make_pair (std::move (R. first ), Group));
214
+ Tasks.emplace_back (std::make_pair (std::move (Task ), Group));
167
215
requestedThreads = ActiveThreads + Tasks.size ();
168
216
}
169
217
QueueCondition.notify_one ();
170
218
grow (requestedThreads);
171
- return R.second .share ();
172
-
173
- #else // LLVM_ENABLE_THREADS Disabled
174
-
175
- // Get a Future with launch::deferred execution using std::async
176
- auto Future = std::async (std::launch::deferred, std::move (Task)).share ();
177
- // Wrap the future so that both ThreadPool::wait() can operate and the
178
- // returned future can be sync'ed on.
179
- Tasks.emplace_back (std::make_pair ([Future]() { Future.get (); }, Group));
180
- return Future;
181
219
#endif
182
220
}
183
221
184
222
#if LLVM_ENABLE_THREADS
185
- // Grow to ensure that we have at least `requested` Threads, but do not go
186
- // over MaxThreadCount.
223
+ // / Grow to ensure that we have at least `requested` Threads, but do not go
224
+ // / over MaxThreadCount.
187
225
void grow (int requested);
188
226
189
227
void processTasks (ThreadPoolTaskGroup *WaitingForGroup);
@@ -227,7 +265,7 @@ class ThreadPool {
227
265
class ThreadPoolTaskGroup {
228
266
public:
229
267
// / The ThreadPool argument is the thread pool to forward calls to.
230
- ThreadPoolTaskGroup (ThreadPool &Pool) : Pool(Pool) {}
268
+ ThreadPoolTaskGroup (ThreadPoolInterface &Pool) : Pool(Pool) {}
231
269
232
270
// / Blocking destructor: will wait for all the tasks in the group to complete
233
271
// / by calling ThreadPool::wait().
@@ -244,7 +282,7 @@ class ThreadPoolTaskGroup {
244
282
void wait () { Pool.wait (*this ); }
245
283
246
284
private:
247
- ThreadPool &Pool;
285
+ ThreadPoolInterface &Pool;
248
286
};
249
287
250
288
} // namespace llvm
0 commit comments