|
20 | 20 | #include <iterator>
|
21 | 21 | #include <functional>
|
22 | 22 | #include <vector>
|
23 |
| -#include <pthread.h> |
24 |
| - |
25 |
| -#if !defined(_POSIX_BARRIERS) || _POSIX_BARRIERS < 0 |
26 |
| -// Implement pthread_barrier_* for platforms that don't implement them (Darwin) |
27 |
| - |
28 |
| -#define PTHREAD_BARRIER_SERIAL_THREAD 1 |
29 |
| -struct pthread_barrier_t { |
30 |
| - pthread_mutex_t mutex; |
31 |
| - pthread_cond_t cond; |
32 |
| - |
33 |
| - unsigned count; |
34 |
| - unsigned numThreadsWaiting; |
35 |
| -}; |
36 |
| -typedef void *pthread_barrierattr_t; |
37 |
| - |
38 |
| -static int pthread_barrier_init(pthread_barrier_t *barrier, |
39 |
| - pthread_barrierattr_t*, unsigned count) { |
40 |
| - if (count == 0) { |
41 |
| - errno = EINVAL; |
42 |
| - return -1; |
43 |
| - } |
44 |
| - if (pthread_mutex_init(&barrier->mutex, nullptr) != 0) { |
45 |
| - return -1; |
46 |
| - } |
47 |
| - if (pthread_cond_init(&barrier->cond, nullptr) != 0) { |
48 |
| - pthread_mutex_destroy(&barrier->mutex); |
49 |
| - return -1; |
50 |
| - } |
51 |
| - barrier->count = count; |
52 |
| - barrier->numThreadsWaiting = 0; |
53 |
| - return 0; |
54 |
| -} |
55 |
| - |
56 |
| -static int pthread_barrier_destroy(pthread_barrier_t *barrier) { |
57 |
| - // want to destroy both even if destroying one fails. |
58 |
| - int ret = 0; |
59 |
| - if (pthread_cond_destroy(&barrier->cond) != 0) { |
60 |
| - ret = -1; |
61 |
| - } |
62 |
| - if (pthread_mutex_destroy(&barrier->mutex) != 0) { |
63 |
| - ret = -1; |
64 |
| - } |
65 |
| - return ret; |
66 |
| -} |
67 |
| - |
68 |
| -static int pthread_barrier_wait(pthread_barrier_t *barrier) { |
69 |
| - if (pthread_mutex_lock(&barrier->mutex) != 0) { |
70 |
| - return -1; |
71 |
| - } |
72 |
| - ++barrier->numThreadsWaiting; |
73 |
| - if (barrier->numThreadsWaiting < barrier->count) { |
74 |
| - // Put the thread to sleep. |
75 |
| - if (pthread_cond_wait(&barrier->cond, &barrier->mutex) != 0) { |
76 |
| - return -1; |
77 |
| - } |
78 |
| - if (pthread_mutex_unlock(&barrier->mutex) != 0) { |
79 |
| - return -1; |
80 |
| - } |
81 |
| - return 0; |
82 |
| - } else { |
83 |
| - // Reset thread count. |
84 |
| - barrier->numThreadsWaiting = 0; |
85 |
| - |
86 |
| - // Wake up all threads. |
87 |
| - if (pthread_cond_broadcast(&barrier->cond) != 0) { |
88 |
| - return -1; |
89 |
| - } |
90 |
| - if (pthread_mutex_unlock(&barrier->mutex) != 0) { |
91 |
| - return -1; |
92 |
| - } |
93 |
| - return PTHREAD_BARRIER_SERIAL_THREAD; |
94 |
| - } |
95 |
| -} |
96 |
| -#endif |
| 23 | +#include <thread> |
| 24 | +#include <condition_variable> |
97 | 25 |
|
98 | 26 | using namespace swift;
|
99 | 27 |
|
100 | 28 | // Race testing.
|
101 | 29 |
|
102 | 30 | template <typename T>
|
103 |
| -struct RaceArgs { |
| 31 | +struct RaceThreadContext { |
104 | 32 | std::function<T()> code;
|
105 |
| - pthread_barrier_t *go; |
| 33 | + T result; |
| 34 | + |
| 35 | + unsigned numThreads; |
| 36 | + unsigned &numThreadsReady; |
| 37 | + std::mutex &sharedMutex; |
| 38 | + std::condition_variable &start_condition; |
106 | 39 | };
|
107 | 40 |
|
108 |
| -void *RaceThunk(void *vargs) { |
109 |
| - RaceArgs<void*> *args = static_cast<RaceArgs<void*> *>(vargs); |
110 |
| - // Signal ready. Wait for go. |
111 |
| - pthread_barrier_wait(args->go); |
112 |
| - return args->code(); |
| 41 | +template <typename T> |
| 42 | +void RaceThunk(RaceThreadContext<T> &ctx) { |
| 43 | + // update shared state |
| 44 | + std::unique_lock<std::mutex> lk(ctx.sharedMutex); |
| 45 | + ++ctx.numThreadsReady; |
| 46 | + bool isLastThread = ctx.numThreadsReady == ctx.numThreads; |
| 47 | + |
| 48 | + // wait until the rest of the thunks are ready |
| 49 | + ctx.start_condition.wait(lk, [&ctx]{ // waiting releases the lock |
| 50 | + return ctx.numThreadsReady == ctx.numThreads; |
| 51 | + }); |
| 52 | + lk.unlock(); |
| 53 | + |
| 54 | + // The last thread will signal the condition_variable to kick off the rest |
| 55 | + // of the waiting threads to start. |
| 56 | + if (isLastThread) ctx.start_condition.notify_all(); |
| 57 | + |
| 58 | + ctx.result = ctx.code(); |
113 | 59 | }
|
114 | 60 |
|
115 |
| -/// RaceTest(code) runs code in many threads simultaneously, |
| 61 | +/// RaceTest(code) runs code in many threads simultaneously, |
116 | 62 | /// and returns a vector of all returned results.
|
117 |
| -template <typename T, int NumThreads = 64> |
118 |
| -std::vector<T> |
119 |
| -RaceTest(std::function<T()> code) |
120 |
| -{ |
121 |
| - const unsigned threadCount = NumThreads; |
122 |
| - |
123 |
| - pthread_barrier_t go; |
124 |
| - pthread_barrier_init(&go, nullptr, threadCount); |
125 |
| - |
126 |
| - // Create the threads. |
127 |
| - pthread_t threads[threadCount]; |
128 |
| - std::vector<RaceArgs<T>> args(threadCount, {code, &go}); |
129 |
| - |
130 |
| - for (unsigned i = 0; i < threadCount; i++) { |
131 |
| - pthread_create(&threads[i], nullptr, &RaceThunk, &args[i]); |
132 |
| - } |
| 63 | +template <typename T, unsigned NumThreads = 64> |
| 64 | +std::vector<T> RaceTest(std::function<T()> code) { |
| 65 | + unsigned numThreadsReady = 0; |
| 66 | + std::mutex sharedMutex; |
| 67 | + std::condition_variable start_condition; |
| 68 | + T result = NULL; |
| 69 | + |
| 70 | + // Create the contexts |
| 71 | + std::vector<RaceThreadContext<T>> contexts(NumThreads, { |
| 72 | + code, |
| 73 | + result, |
| 74 | + NumThreads, |
| 75 | + numThreadsReady, |
| 76 | + sharedMutex, |
| 77 | + start_condition}); |
| 78 | + |
| 79 | + // Create the threads |
| 80 | + std::vector<std::thread> threads; |
| 81 | + threads.reserve(NumThreads); |
| 82 | + for (unsigned i = 0; i < NumThreads; i++) |
| 83 | + threads.emplace_back(std::bind(RaceThunk<T>, std::ref(contexts[i]))); |
133 | 84 |
|
134 | 85 | // Collect results.
|
135 | 86 | std::vector<T> results;
|
136 |
| - for (unsigned i = 0; i < threadCount; i++) { |
137 |
| - void *result; |
138 |
| - pthread_join(threads[i], &result); |
139 |
| - results.push_back(static_cast<T>(result)); |
| 87 | + results.reserve(NumThreads); |
| 88 | + for (unsigned i = 0; i < NumThreads; i++) { |
| 89 | + threads[i].join(); |
| 90 | + results.emplace_back(contexts[i].result); |
140 | 91 | }
|
141 |
| - |
142 |
| - pthread_barrier_destroy(&go); |
143 |
| - |
144 | 92 | return results;
|
145 | 93 | }
|
146 | 94 |
|
147 |
| -/// RaceTest_ExpectEqual(code) runs code in many threads simultaneously, |
| 95 | +/// RaceTest_ExpectEqual(code) runs code in many threads simultaneously, |
148 | 96 | /// verifies that they all returned the same value, and returns that value.
|
149 | 97 | template<typename T>
|
150 | 98 | T RaceTest_ExpectEqual(std::function<T()> code)
|
@@ -474,7 +422,7 @@ TEST(MetadataTest, getExistentialMetadata) {
|
474 | 422 | mixedWitnessTable->getSuperclassConstraint());
|
475 | 423 | return mixedWitnessTable;
|
476 | 424 | });
|
477 |
| - |
| 425 | + |
478 | 426 | const ValueWitnessTable *ExpectedErrorValueWitnesses;
|
479 | 427 | #if SWIFT_OBJC_INTEROP
|
480 | 428 | ExpectedErrorValueWitnesses = &VALUE_WITNESS_SYM(BO);
|
|
0 commit comments