Skip to content

Commit a66435c

Browse files
authored
Merge pull request #39288 from DougGregor/back-deploy-width-limited-queues
Use private concurrent queues for back-deployed concurrency.
2 parents 6cf40d3 + 136a40a commit a66435c

File tree

1 file changed

+31
-1
lines changed

1 file changed

+31
-1
lines changed

stdlib/public/Concurrency/GlobalExecutor.cpp

Lines changed: 31 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -269,16 +269,45 @@ static constexpr size_t globalQueueCacheCount =
269269
static_cast<size_t>(JobPriority::UserInteractive) + 1;
270270
static std::atomic<dispatch_queue_t> globalQueueCache[globalQueueCacheCount];
271271

272+
#ifdef SWIFT_CONCURRENCY_BACK_DEPLOYMENT
273+
extern "C" void dispatch_queue_set_width(dispatch_queue_t dq, long width);
274+
#endif
275+
272276
static dispatch_queue_t getGlobalQueue(JobPriority priority) {
273277
size_t numericPriority = static_cast<size_t>(priority);
274278
if (numericPriority >= globalQueueCacheCount)
275279
swift_Concurrency_fatalError(0, "invalid job priority %#zx");
276280

281+
#ifdef SWIFT_CONCURRENCY_BACK_DEPLOYMENT
282+
std::memory_order loadOrder = std::memory_order_acquire;
283+
#else
284+
std::memory_order loadOrder = std::memory_order_relaxed;
285+
#endif
286+
277287
auto *ptr = &globalQueueCache[numericPriority];
278-
auto queue = ptr->load(std::memory_order_relaxed);
288+
auto queue = ptr->load(loadOrder);
279289
if (SWIFT_LIKELY(queue))
280290
return queue;
281291

292+
#ifdef SWIFT_CONCURRENCY_BACK_DEPLOYMENT
293+
const int DISPATCH_QUEUE_WIDTH_MAX_LOGICAL_CPUS = -3;
294+
295+
// Create a new cooperative concurrent queue and swap it in.
296+
dispatch_queue_attr_t newQueueAttr = dispatch_queue_attr_make_with_qos_class(
297+
DISPATCH_QUEUE_CONCURRENT, (dispatch_qos_class_t)priority, 0);
298+
dispatch_queue_t newQueue = dispatch_queue_create(
299+
"Swift global concurrent queue", newQueueAttr);
300+
dispatch_queue_set_width(newQueue, DISPATCH_QUEUE_WIDTH_MAX_LOGICAL_CPUS);
301+
302+
if (!ptr->compare_exchange_strong(queue, newQueue,
303+
/*success*/ std::memory_order_release,
304+
/*failure*/ std::memory_order_acquire)) {
305+
dispatch_release(newQueue);
306+
return queue;
307+
}
308+
309+
return newQueue;
310+
#else
282311
// If we don't have a queue cached for this priority, cache it now. This may
283312
// race with other threads doing this at the same time for this priority, but
284313
// that's OK, they'll all end up writing the same value.
@@ -288,6 +317,7 @@ static dispatch_queue_t getGlobalQueue(JobPriority priority) {
288317
// Unconditionally store it back in the cache. If we raced with another
289318
// thread, we'll just overwrite the entry with the same value.
290319
ptr->store(queue, std::memory_order_relaxed);
320+
#endif
291321

292322
return queue;
293323
}

0 commit comments

Comments
 (0)