@@ -26,6 +26,18 @@ enum class MemoryOrder : int {
26
26
SEQ_CST = __ATOMIC_SEQ_CST
27
27
};
28
28
29
+ // These are a clang extension, see the clang documenation for more information:
30
+ // https://clang.llvm.org/docs/LanguageExtensions.html#scoped-atomic-builtins.
31
+ enum class MemoryScope : int {
32
+ #if defined(__MEMORY_SCOPE_SYSTEM) && defined(__MEMORY_SCOPE_DEVICE)
33
+ SYSTEM = __MEMORY_SCOPE_SYSTEM,
34
+ DEVICE = __MEMORY_SCOPE_DEVICE,
35
+ #else
36
+ SYSTEM = 0 ,
37
+ DEVICE = 0 ,
38
+ #endif
39
+ };
40
+
29
41
template <typename T> struct Atomic {
30
42
// For now, we will restrict to only arithmetic types.
31
43
static_assert (is_arithmetic_v<T>, " Only arithmetic types can be atomic." );
@@ -54,48 +66,82 @@ template <typename T> struct Atomic {
54
66
Atomic (const Atomic &) = delete ;
55
67
Atomic &operator =(const Atomic &) = delete ;
56
68
57
- // Atomic load
69
+ // Atomic load.
58
70
operator T () { return __atomic_load_n (&val, int (MemoryOrder::SEQ_CST)); }
59
71
60
- T load (MemoryOrder mem_ord = MemoryOrder::SEQ_CST) {
61
- return __atomic_load_n (&val, int (mem_ord));
72
+ T load (MemoryOrder mem_ord = MemoryOrder::SEQ_CST,
73
+ [[maybe_unused]] MemoryScope mem_scope = MemoryScope::DEVICE) {
74
+ if constexpr (LIBC_HAS_BUILTIN (__scoped_atomic_load_n))
75
+ return __scoped_atomic_load_n (&val, int (mem_ord), (int )(mem_scope));
76
+ else
77
+ return __atomic_load_n (&val, int (mem_ord));
62
78
}
63
79
64
- // Atomic store
80
+ // Atomic store.
65
81
T operator =(T rhs) {
66
82
__atomic_store_n (&val, rhs, int (MemoryOrder::SEQ_CST));
67
83
return rhs;
68
84
}
69
85
70
- void store (T rhs, MemoryOrder mem_ord = MemoryOrder::SEQ_CST) {
71
- __atomic_store_n (&val, rhs, int (mem_ord));
86
+ void store (T rhs, MemoryOrder mem_ord = MemoryOrder::SEQ_CST,
87
+ [[maybe_unused]] MemoryScope mem_scope = MemoryScope::DEVICE) {
88
+ if constexpr (LIBC_HAS_BUILTIN (__scoped_atomic_store_n))
89
+ __scoped_atomic_store_n (&val, rhs, int (mem_ord), (int )(mem_scope));
90
+ else
91
+ __atomic_store_n (&val, rhs, int (mem_ord));
72
92
}
73
93
74
94
// Atomic compare exchange
75
- bool compare_exchange_strong (T &expected, T desired,
76
- MemoryOrder mem_ord = MemoryOrder::SEQ_CST) {
95
+ bool compare_exchange_strong (
96
+ T &expected, T desired, MemoryOrder mem_ord = MemoryOrder::SEQ_CST,
97
+ [[maybe_unused]] MemoryScope mem_scope = MemoryScope::DEVICE) {
77
98
return __atomic_compare_exchange_n (&val, &expected, desired, false ,
78
99
int (mem_ord), int (mem_ord));
79
100
}
80
101
81
- T exchange (T desired, MemoryOrder mem_ord = MemoryOrder::SEQ_CST) {
82
- return __atomic_exchange_n (&val, desired, int (mem_ord));
102
+ T exchange (T desired, MemoryOrder mem_ord = MemoryOrder::SEQ_CST,
103
+ [[maybe_unused]] MemoryScope mem_scope = MemoryScope::DEVICE) {
104
+ if constexpr (LIBC_HAS_BUILTIN (__scoped_atomic_exchange_n))
105
+ return __scoped_atomic_exchange_n (&val, desired, int (mem_ord),
106
+ (int )(mem_scope));
107
+ else
108
+ return __atomic_exchange_n (&val, desired, int (mem_ord));
83
109
}
84
110
85
- T fetch_add (T increment, MemoryOrder mem_ord = MemoryOrder::SEQ_CST) {
86
- return __atomic_fetch_add (&val, increment, int (mem_ord));
111
+ T fetch_add (T increment, MemoryOrder mem_ord = MemoryOrder::SEQ_CST,
112
+ [[maybe_unused]] MemoryScope mem_scope = MemoryScope::DEVICE) {
113
+ if constexpr (LIBC_HAS_BUILTIN (__scoped_atomic_fetch_add))
114
+ return __scoped_atomic_fetch_add (&val, increment, int (mem_ord),
115
+ (int )(mem_scope));
116
+ else
117
+ return __atomic_fetch_add (&val, increment, int (mem_ord));
87
118
}
88
119
89
- T fetch_or (T mask, MemoryOrder mem_ord = MemoryOrder::SEQ_CST) {
90
- return __atomic_fetch_or (&val, mask, int (mem_ord));
120
+ T fetch_or (T mask, MemoryOrder mem_ord = MemoryOrder::SEQ_CST,
121
+ [[maybe_unused]] MemoryScope mem_scope = MemoryScope::DEVICE) {
122
+ if constexpr (LIBC_HAS_BUILTIN (__scoped_atomic_fetch_or))
123
+ return __scoped_atomic_fetch_or (&val, mask, int (mem_ord),
124
+ (int )(mem_scope));
125
+ else
126
+ return __atomic_fetch_or (&val, mask, int (mem_ord));
91
127
}
92
128
93
- T fetch_and (T mask, MemoryOrder mem_ord = MemoryOrder::SEQ_CST) {
94
- return __atomic_fetch_and (&val, mask, int (mem_ord));
129
+ T fetch_and (T mask, MemoryOrder mem_ord = MemoryOrder::SEQ_CST,
130
+ [[maybe_unused]] MemoryScope mem_scope = MemoryScope::DEVICE) {
131
+ if constexpr (LIBC_HAS_BUILTIN (__scoped_atomic_fetch_and))
132
+ return __scoped_atomic_fetch_and (&val, mask, int (mem_ord),
133
+ (int )(mem_scope));
134
+ else
135
+ return __atomic_fetch_and (&val, mask, int (mem_ord));
95
136
}
96
137
97
- T fetch_sub (T decrement, MemoryOrder mem_ord = MemoryOrder::SEQ_CST) {
98
- return __atomic_fetch_sub (&val, decrement, int (mem_ord));
138
+ T fetch_sub (T decrement, MemoryOrder mem_ord = MemoryOrder::SEQ_CST,
139
+ [[maybe_unused]] MemoryScope mem_scope = MemoryScope::DEVICE) {
140
+ if constexpr (LIBC_HAS_BUILTIN (__scoped_atomic_fetch_sub))
141
+ return __scoped_atomic_fetch_sub (&val, decrement, int (mem_ord),
142
+ (int )(mem_scope));
143
+ else
144
+ return __atomic_fetch_sub (&val, decrement, int (mem_ord));
99
145
}
100
146
101
147
// Set the value without using an atomic operation. This is useful
0 commit comments