@@ -40,6 +40,28 @@ enum class MemoryScope : int {
40
40
#endif
41
41
};
42
42
43
+ namespace impl {
44
+ LIBC_INLINE constexpr int order (MemoryOrder mem_ord) {
45
+ return static_cast <int >(mem_ord);
46
+ }
47
+
48
+ LIBC_INLINE constexpr int scope (MemoryScope mem_scope) {
49
+ return static_cast <int >(mem_scope);
50
+ }
51
+
52
+ template <class T > LIBC_INLINE T *addressof (T &ref) {
53
+ return __builtin_addressof (ref);
54
+ }
55
+
56
+ LIBC_INLINE constexpr int infer_failure_order (MemoryOrder mem_ord) {
57
+ if (mem_ord == MemoryOrder::RELEASE)
58
+ return order (MemoryOrder::RELAXED);
59
+ if (mem_ord == MemoryOrder::ACQ_REL)
60
+ return order (MemoryOrder::ACQUIRE);
61
+ return order (mem_ord);
62
+ }
63
+ } // namespace impl
64
+
43
65
template <typename T> struct Atomic {
44
66
static_assert (is_trivially_copyable_v<T> && is_copy_constructible_v<T> &&
45
67
is_move_constructible_v<T> && is_copy_assignable_v<T> &&
@@ -54,15 +76,6 @@ template <typename T> struct Atomic {
54
76
55
77
private:
56
78
// type conversion helper to avoid long c++ style casts
57
- LIBC_INLINE static int order (MemoryOrder mem_ord) {
58
- return static_cast <int >(mem_ord);
59
- }
60
-
61
- LIBC_INLINE static int scope (MemoryScope mem_scope) {
62
- return static_cast <int >(mem_scope);
63
- }
64
-
65
- LIBC_INLINE static T *addressof (T &ref) { return __builtin_addressof (ref); }
66
79
67
80
// Require types that are 1, 2, 4, 8, or 16 bytes in length to be aligned to
68
81
// at least their size to be potentially used lock-free.
@@ -98,10 +111,11 @@ template <typename T> struct Atomic {
98
111
[[maybe_unused]] MemoryScope mem_scope = MemoryScope::DEVICE) {
99
112
T res;
100
113
#if __has_builtin(__scoped_atomic_load)
101
- __scoped_atomic_load (addressof (val), addressof (res), order (mem_ord ),
102
- scope (mem_scope));
114
+ __scoped_atomic_load (impl:: addressof (val), impl:: addressof (res),
115
+ impl::order (mem_ord), impl:: scope (mem_scope));
103
116
#else
104
- __atomic_load (addressof (val), addressof (res), order (mem_ord));
117
+ __atomic_load (impl::addressof (val), impl::addressof (res),
118
+ impl::order (mem_ord));
105
119
#endif
106
120
return res;
107
121
}
@@ -116,20 +130,22 @@ template <typename T> struct Atomic {
116
130
store (T rhs, MemoryOrder mem_ord = MemoryOrder::SEQ_CST,
117
131
[[maybe_unused]] MemoryScope mem_scope = MemoryScope::DEVICE) {
118
132
#if __has_builtin(__scoped_atomic_store)
119
- __scoped_atomic_store (addressof (val), addressof (rhs), order (mem_ord ),
120
- scope (mem_scope));
133
+ __scoped_atomic_store (impl:: addressof (val), impl:: addressof (rhs),
134
+ impl::order (mem_ord), impl:: scope (mem_scope));
121
135
#else
122
- __atomic_store (addressof (val), addressof (rhs), order (mem_ord));
136
+ __atomic_store (impl::addressof (val), impl::addressof (rhs),
137
+ impl::order (mem_ord));
123
138
#endif
124
139
}
125
140
126
141
// Atomic compare exchange
127
142
LIBC_INLINE bool compare_exchange_strong (
128
143
T &expected, T desired, MemoryOrder mem_ord = MemoryOrder::SEQ_CST,
129
144
[[maybe_unused]] MemoryScope mem_scope = MemoryScope::DEVICE) {
130
- return __atomic_compare_exchange (addressof (val), addressof (expected),
131
- addressof (desired), false , order (mem_ord),
132
- order (mem_ord));
145
+ return __atomic_compare_exchange (
146
+ impl::addressof (val), impl::addressof (expected),
147
+ impl::addressof (desired), false , impl::order (mem_ord),
148
+ impl::infer_failure_order (mem_ord));
133
149
}
134
150
135
151
// Atomic compare exchange (separate success and failure memory orders)
@@ -138,17 +154,19 @@ template <typename T> struct Atomic {
138
154
MemoryOrder failure_order,
139
155
[[maybe_unused]] MemoryScope mem_scope = MemoryScope::DEVICE) {
140
156
return __atomic_compare_exchange (
141
- addressof (val), addressof (expected), addressof (desired), false ,
142
- order (success_order), order (failure_order));
157
+ impl::addressof (val), impl::addressof (expected),
158
+ impl::addressof (desired), false , impl::order (success_order),
159
+ impl::order (failure_order));
143
160
}
144
161
145
162
// Atomic compare exchange (weak version)
146
163
LIBC_INLINE bool compare_exchange_weak (
147
164
T &expected, T desired, MemoryOrder mem_ord = MemoryOrder::SEQ_CST,
148
165
[[maybe_unused]] MemoryScope mem_scope = MemoryScope::DEVICE) {
149
- return __atomic_compare_exchange (addressof (val), addressof (expected),
150
- addressof (desired), true , order (mem_ord),
151
- order (mem_ord));
166
+ return __atomic_compare_exchange (
167
+ impl::addressof (val), impl::addressof (expected),
168
+ impl::addressof (desired), true , impl::order (mem_ord),
169
+ impl::infer_failure_order (mem_ord));
152
170
}
153
171
154
172
// Atomic compare exchange (weak version with separate success and failure
@@ -158,20 +176,22 @@ template <typename T> struct Atomic {
158
176
MemoryOrder failure_order,
159
177
[[maybe_unused]] MemoryScope mem_scope = MemoryScope::DEVICE) {
160
178
return __atomic_compare_exchange (
161
- addressof (val), addressof (expected), addressof (desired), true ,
162
- order (success_order), order (failure_order));
179
+ impl::addressof (val), impl::addressof (expected),
180
+ impl::addressof (desired), true , impl::order (success_order),
181
+ impl::order (failure_order));
163
182
}
164
183
165
184
LIBC_INLINE T
166
185
exchange (T desired, MemoryOrder mem_ord = MemoryOrder::SEQ_CST,
167
186
[[maybe_unused]] MemoryScope mem_scope = MemoryScope::DEVICE) {
168
187
T ret;
169
188
#if __has_builtin(__scoped_atomic_exchange)
170
- __scoped_atomic_exchange (addressof (val), addressof (desired), addressof (ret),
171
- order (mem_ord), scope (mem_scope));
189
+ __scoped_atomic_exchange (impl::addressof (val), impl::addressof (desired),
190
+ impl::addressof (ret), impl::order (mem_ord),
191
+ impl::scope (mem_scope));
172
192
#else
173
- __atomic_exchange (addressof (val), addressof (desired), addressof (ret ),
174
- order (mem_ord));
193
+ __atomic_exchange (impl:: addressof (val), impl:: addressof (desired),
194
+ impl::addressof (ret), impl:: order (mem_ord));
175
195
#endif
176
196
return ret;
177
197
}
@@ -181,10 +201,12 @@ template <typename T> struct Atomic {
181
201
[[maybe_unused]] MemoryScope mem_scope = MemoryScope::DEVICE) {
182
202
static_assert (cpp::is_integral_v<T>, " T must be an integral type." );
183
203
#if __has_builtin(__scoped_atomic_fetch_add)
184
- return __scoped_atomic_fetch_add (addressof (val), increment, order (mem_ord),
185
- scope (mem_scope));
204
+ return __scoped_atomic_fetch_add (impl::addressof (val), increment,
205
+ impl::order (mem_ord),
206
+ impl::scope (mem_scope));
186
207
#else
187
- return __atomic_fetch_add (addressof (val), increment, order (mem_ord));
208
+ return __atomic_fetch_add (impl::addressof (val), increment,
209
+ impl::order (mem_ord));
188
210
#endif
189
211
}
190
212
@@ -193,10 +215,11 @@ template <typename T> struct Atomic {
193
215
[[maybe_unused]] MemoryScope mem_scope = MemoryScope::DEVICE) {
194
216
static_assert (cpp::is_integral_v<T>, " T must be an integral type." );
195
217
#if __has_builtin(__scoped_atomic_fetch_or)
196
- return __scoped_atomic_fetch_or (addressof (val), mask, order (mem_ord),
197
- scope (mem_scope));
218
+ return __scoped_atomic_fetch_or (impl::addressof (val), mask,
219
+ impl::order (mem_ord),
220
+ impl::scope (mem_scope));
198
221
#else
199
- return __atomic_fetch_or (addressof (val), mask, order (mem_ord));
222
+ return __atomic_fetch_or (impl:: addressof (val), mask, impl:: order (mem_ord));
200
223
#endif
201
224
}
202
225
@@ -205,10 +228,11 @@ template <typename T> struct Atomic {
205
228
[[maybe_unused]] MemoryScope mem_scope = MemoryScope::DEVICE) {
206
229
static_assert (cpp::is_integral_v<T>, " T must be an integral type." );
207
230
#if __has_builtin(__scoped_atomic_fetch_and)
208
- return __scoped_atomic_fetch_and (addressof (val), mask, order (mem_ord),
209
- scope (mem_scope));
231
+ return __scoped_atomic_fetch_and (impl::addressof (val), mask,
232
+ impl::order (mem_ord),
233
+ impl::scope (mem_scope));
210
234
#else
211
- return __atomic_fetch_and (addressof (val), mask, order (mem_ord));
235
+ return __atomic_fetch_and (impl:: addressof (val), mask, impl:: order (mem_ord));
212
236
#endif
213
237
}
214
238
@@ -217,10 +241,12 @@ template <typename T> struct Atomic {
217
241
[[maybe_unused]] MemoryScope mem_scope = MemoryScope::DEVICE) {
218
242
static_assert (cpp::is_integral_v<T>, " T must be an integral type." );
219
243
#if __has_builtin(__scoped_atomic_fetch_sub)
220
- return __scoped_atomic_fetch_sub (addressof (val), decrement, order (mem_ord),
221
- scope (mem_scope));
244
+ return __scoped_atomic_fetch_sub (impl::addressof (val), decrement,
245
+ impl::order (mem_ord),
246
+ impl::scope (mem_scope));
222
247
#else
223
- return __atomic_fetch_sub (addressof (val), decrement, order (mem_ord));
248
+ return __atomic_fetch_sub (impl::addressof (val), decrement,
249
+ impl::order (mem_ord));
224
250
#endif
225
251
}
226
252
@@ -244,14 +270,6 @@ template <typename T> struct AtomicRef {
244
270
private:
245
271
T *ptr;
246
272
247
- LIBC_INLINE static int order (MemoryOrder mem_ord) {
248
- return static_cast <int >(mem_ord);
249
- }
250
-
251
- LIBC_INLINE static int scope (MemoryScope mem_scope) {
252
- return static_cast <int >(mem_scope);
253
- }
254
-
255
273
public:
256
274
// Constructor from T reference
257
275
LIBC_INLINE explicit constexpr AtomicRef (T &obj) : ptr(&obj) {}
@@ -270,9 +288,10 @@ template <typename T> struct AtomicRef {
270
288
[[maybe_unused]] MemoryScope mem_scope = MemoryScope::DEVICE) const {
271
289
T res;
272
290
#if __has_builtin(__scoped_atomic_load)
273
- __scoped_atomic_load (ptr, &res, order (mem_ord), scope (mem_scope));
291
+ __scoped_atomic_load (ptr, &res, impl::order (mem_ord),
292
+ impl::scope (mem_scope));
274
293
#else
275
- __atomic_load (ptr, &res, order (mem_ord));
294
+ __atomic_load (ptr, &res, impl:: order (mem_ord));
276
295
#endif
277
296
return res;
278
297
}
@@ -287,9 +306,10 @@ template <typename T> struct AtomicRef {
287
306
store (T rhs, MemoryOrder mem_ord = MemoryOrder::SEQ_CST,
288
307
[[maybe_unused]] MemoryScope mem_scope = MemoryScope::DEVICE) const {
289
308
#if __has_builtin(__scoped_atomic_store)
290
- __scoped_atomic_store (ptr, &rhs, order (mem_ord), scope (mem_scope));
309
+ __scoped_atomic_store (ptr, &rhs, impl::order (mem_ord),
310
+ impl::scope (mem_scope));
291
311
#else
292
- __atomic_store (ptr, &rhs, order (mem_ord));
312
+ __atomic_store (ptr, &rhs, impl:: order (mem_ord));
293
313
#endif
294
314
}
295
315
@@ -298,7 +318,8 @@ template <typename T> struct AtomicRef {
298
318
T &expected, T desired, MemoryOrder mem_ord = MemoryOrder::SEQ_CST,
299
319
[[maybe_unused]] MemoryScope mem_scope = MemoryScope::DEVICE) const {
300
320
return __atomic_compare_exchange (ptr, &expected, &desired, false ,
301
- order (mem_ord), order (mem_ord));
321
+ impl::order (mem_ord),
322
+ impl::infer_failure_order (mem_ord));
302
323
}
303
324
304
325
// Atomic compare exchange (strong, separate success/failure memory orders)
@@ -307,8 +328,8 @@ template <typename T> struct AtomicRef {
307
328
MemoryOrder failure_order,
308
329
[[maybe_unused]] MemoryScope mem_scope = MemoryScope::DEVICE) const {
309
330
return __atomic_compare_exchange (ptr, &expected, &desired, false ,
310
- order (success_order),
311
- order (failure_order));
331
+ impl:: order (success_order),
332
+ impl:: order (failure_order));
312
333
}
313
334
314
335
// Atomic exchange
@@ -317,10 +338,10 @@ template <typename T> struct AtomicRef {
317
338
[[maybe_unused]] MemoryScope mem_scope = MemoryScope::DEVICE) const {
318
339
T ret;
319
340
#if __has_builtin(__scoped_atomic_exchange)
320
- __scoped_atomic_exchange (ptr, &desired, &ret, order (mem_ord),
321
- scope (mem_scope));
341
+ __scoped_atomic_exchange (ptr, &desired, &ret, impl:: order (mem_ord),
342
+ impl:: scope (mem_scope));
322
343
#else
323
- __atomic_exchange (ptr, &desired, &ret, order (mem_ord));
344
+ __atomic_exchange (ptr, &desired, &ret, impl:: order (mem_ord));
324
345
#endif
325
346
return ret;
326
347
}
@@ -330,10 +351,10 @@ template <typename T> struct AtomicRef {
330
351
[[maybe_unused]] MemoryScope mem_scope = MemoryScope::DEVICE) const {
331
352
static_assert (cpp::is_integral_v<T>, " T must be an integral type." );
332
353
#if __has_builtin(__scoped_atomic_fetch_add)
333
- return __scoped_atomic_fetch_add (ptr, increment, order (mem_ord),
334
- scope (mem_scope));
354
+ return __scoped_atomic_fetch_add (ptr, increment, impl:: order (mem_ord),
355
+ impl:: scope (mem_scope));
335
356
#else
336
- return __atomic_fetch_add (ptr, increment, order (mem_ord));
357
+ return __atomic_fetch_add (ptr, increment, impl:: order (mem_ord));
337
358
#endif
338
359
}
339
360
@@ -342,10 +363,10 @@ template <typename T> struct AtomicRef {
342
363
[[maybe_unused]] MemoryScope mem_scope = MemoryScope::DEVICE) const {
343
364
static_assert (cpp::is_integral_v<T>, " T must be an integral type." );
344
365
#if __has_builtin(__scoped_atomic_fetch_or)
345
- return __scoped_atomic_fetch_or (ptr, mask, order (mem_ord),
346
- scope (mem_scope));
366
+ return __scoped_atomic_fetch_or (ptr, mask, impl:: order (mem_ord),
367
+ impl:: scope (mem_scope));
347
368
#else
348
- return __atomic_fetch_or (ptr, mask, order (mem_ord));
369
+ return __atomic_fetch_or (ptr, mask, impl:: order (mem_ord));
349
370
#endif
350
371
}
351
372
@@ -354,10 +375,10 @@ template <typename T> struct AtomicRef {
354
375
[[maybe_unused]] MemoryScope mem_scope = MemoryScope::DEVICE) const {
355
376
static_assert (cpp::is_integral_v<T>, " T must be an integral type." );
356
377
#if __has_builtin(__scoped_atomic_fetch_and)
357
- return __scoped_atomic_fetch_and (ptr, mask, order (mem_ord),
358
- scope (mem_scope));
378
+ return __scoped_atomic_fetch_and (ptr, mask, impl:: order (mem_ord),
379
+ impl:: scope (mem_scope));
359
380
#else
360
- return __atomic_fetch_and (ptr, mask, order (mem_ord));
381
+ return __atomic_fetch_and (ptr, mask, impl:: order (mem_ord));
361
382
#endif
362
383
}
363
384
@@ -366,10 +387,10 @@ template <typename T> struct AtomicRef {
366
387
[[maybe_unused]] MemoryScope mem_scope = MemoryScope::DEVICE) const {
367
388
static_assert (cpp::is_integral_v<T>, " T must be an integral type." );
368
389
#if __has_builtin(__scoped_atomic_fetch_sub)
369
- return __scoped_atomic_fetch_sub (ptr, decrement, order (mem_ord),
370
- scope (mem_scope));
390
+ return __scoped_atomic_fetch_sub (ptr, decrement, impl:: order (mem_ord),
391
+ impl:: scope (mem_scope));
371
392
#else
372
- return __atomic_fetch_sub (ptr, decrement, order (mem_ord));
393
+ return __atomic_fetch_sub (ptr, decrement, impl:: order (mem_ord));
373
394
#endif
374
395
}
375
396
};
@@ -402,7 +423,6 @@ LIBC_INLINE void atomic_signal_fence([[maybe_unused]] MemoryOrder mem_ord) {
402
423
asm volatile (" " ::: " memory" );
403
424
#endif
404
425
}
405
-
406
426
} // namespace cpp
407
427
} // namespace LIBC_NAMESPACE_DECL
408
428
0 commit comments