@@ -40,15 +40,27 @@ enum class MemoryScope : int {
40
40
};
41
41
42
42
template <typename T> struct Atomic {
43
- // For now, we will restrict to only arithmetic types.
44
- static_assert (is_arithmetic_v<T>, " Only arithmetic types can be atomic." );
43
+ static_assert (is_trivially_copyable_v<T> && is_copy_constructible_v<T> &&
44
+ is_move_constructible_v<T> && is_copy_assignable_v<T> &&
45
+ is_move_assignable_v<T>,
46
+ " atomic<T> requires T to be trivially copyable, copy "
47
+ " constructible, move constructible, copy assignable, "
48
+ " and move assignable." );
45
49
46
50
private:
47
51
// The value stored should be appropriately aligned so that
48
52
// hardware instructions used to perform atomic operations work
49
53
// correctly.
50
54
static constexpr int ALIGNMENT = sizeof (T) > alignof (T) ? sizeof(T)
51
55
: alignof(T);
56
+ // type conversion helper to avoid long c++ style casts
57
+ LIBC_INLINE static int order (MemoryOrder mem_ord) {
58
+ return static_cast <int >(mem_ord);
59
+ }
60
+
61
+ LIBC_INLINE static int scope (MemoryScope mem_scope) {
62
+ return static_cast <int >(mem_scope);
63
+ }
52
64
53
65
public:
54
66
using value_type = T;
@@ -59,131 +71,146 @@ template <typename T> struct Atomic {
59
71
// operations should be performed using the atomic methods however.
60
72
alignas (ALIGNMENT) value_type val;
61
73
62
- constexpr Atomic () = default;
74
+ LIBC_INLINE constexpr Atomic () = default;
63
75
64
76
// Intializes the value without using atomic operations.
65
- constexpr Atomic (value_type v) : val(v) {}
77
+ LIBC_INLINE constexpr Atomic (value_type v) : val(v) {}
66
78
67
- Atomic (const Atomic &) = delete ;
68
- Atomic &operator =(const Atomic &) = delete ;
79
+ LIBC_INLINE Atomic (const Atomic &) = delete;
80
+ LIBC_INLINE Atomic &operator =(const Atomic &) = delete ;
69
81
70
82
// Atomic load.
71
- operator T () { return __atomic_load_n (&val, int (MemoryOrder::SEQ_CST)); }
72
-
73
- T load (MemoryOrder mem_ord = MemoryOrder::SEQ_CST,
74
- [[maybe_unused]] MemoryScope mem_scope = MemoryScope::DEVICE) {
75
- #if __has_builtin(__scoped_atomic_load_n)
76
- return __scoped_atomic_load_n (&val, int (mem_ord), (int )(mem_scope));
83
+ LIBC_INLINE operator T () { return load (); }
84
+
85
+ LIBC_INLINE T
86
+ load (MemoryOrder mem_ord = MemoryOrder::SEQ_CST,
87
+ [[maybe_unused]] MemoryScope mem_scope = MemoryScope::DEVICE) {
88
+ T res;
89
+ #if __has_builtin(__scoped_atomic_load)
90
+ __scoped_atomic_load (&val, &res, order (mem_ord), scope (mem_scope));
77
91
#else
78
- return __atomic_load_n (&val, int (mem_ord));
92
+ __atomic_load (&val, &res, order (mem_ord));
79
93
#endif
94
+ return res;
80
95
}
81
96
82
97
// Atomic store.
83
- T operator =(T rhs) {
84
- __atomic_store_n (&val, rhs, int (MemoryOrder::SEQ_CST) );
98
+ LIBC_INLINE T operator =(T rhs) {
99
+ store ( rhs);
85
100
return rhs;
86
101
}
87
102
88
- void store (T rhs, MemoryOrder mem_ord = MemoryOrder::SEQ_CST,
89
- [[maybe_unused]] MemoryScope mem_scope = MemoryScope::DEVICE) {
90
- #if __has_builtin(__scoped_atomic_store_n)
91
- __scoped_atomic_store_n (&val, rhs, int (mem_ord), (int )(mem_scope));
103
+ LIBC_INLINE void
104
+ store (T rhs, MemoryOrder mem_ord = MemoryOrder::SEQ_CST,
105
+ [[maybe_unused]] MemoryScope mem_scope = MemoryScope::DEVICE) {
106
+ #if __has_builtin(__scoped_atomic_store)
107
+ __scoped_atomic_store (&val, &rhs, order (mem_ord), scope (mem_scope));
92
108
#else
93
- __atomic_store_n (&val, rhs, int (mem_ord));
109
+ __atomic_store (&val, & rhs, order (mem_ord));
94
110
#endif
95
111
}
96
112
97
113
// Atomic compare exchange
98
- bool compare_exchange_strong (
114
+ LIBC_INLINE bool compare_exchange_strong (
99
115
T &expected, T desired, MemoryOrder mem_ord = MemoryOrder::SEQ_CST,
100
116
[[maybe_unused]] MemoryScope mem_scope = MemoryScope::DEVICE) {
101
- return __atomic_compare_exchange_n (&val, &expected, desired, false ,
102
- int (mem_ord), int (mem_ord));
117
+ return __atomic_compare_exchange (&val, &expected, & desired, false ,
118
+ order (mem_ord), order (mem_ord));
103
119
}
104
120
105
121
// Atomic compare exchange (separate success and failure memory orders)
106
- bool compare_exchange_strong (
122
+ LIBC_INLINE bool compare_exchange_strong (
107
123
T &expected, T desired, MemoryOrder success_order,
108
124
MemoryOrder failure_order,
109
125
[[maybe_unused]] MemoryScope mem_scope = MemoryScope::DEVICE) {
110
- return __atomic_compare_exchange_n (&val, &expected, desired, false ,
111
- static_cast < int > (success_order),
112
- static_cast < int > (failure_order));
126
+ return __atomic_compare_exchange (&val, &expected, & desired, false ,
127
+ order (success_order),
128
+ order (failure_order));
113
129
}
114
130
115
131
// Atomic compare exchange (weak version)
116
- bool compare_exchange_weak (
132
+ LIBC_INLINE bool compare_exchange_weak (
117
133
T &expected, T desired, MemoryOrder mem_ord = MemoryOrder::SEQ_CST,
118
134
[[maybe_unused]] MemoryScope mem_scope = MemoryScope::DEVICE) {
119
- return __atomic_compare_exchange_n (&val, &expected, desired, true ,
120
- static_cast <int >(mem_ord),
121
- static_cast <int >(mem_ord));
135
+ return __atomic_compare_exchange (&val, &expected, &desired, true ,
136
+ order (mem_ord), order (mem_ord));
122
137
}
123
138
124
139
// Atomic compare exchange (weak version with separate success and failure
125
140
// memory orders)
126
- bool compare_exchange_weak (
141
+ LIBC_INLINE bool compare_exchange_weak (
127
142
T &expected, T desired, MemoryOrder success_order,
128
143
MemoryOrder failure_order,
129
144
[[maybe_unused]] MemoryScope mem_scope = MemoryScope::DEVICE) {
130
- return __atomic_compare_exchange_n (&val, &expected, desired, true ,
131
- static_cast < int > (success_order),
132
- static_cast < int > (failure_order));
145
+ return __atomic_compare_exchange (&val, &expected, & desired, true ,
146
+ order (success_order),
147
+ order (failure_order));
133
148
}
134
149
135
- T exchange (T desired, MemoryOrder mem_ord = MemoryOrder::SEQ_CST,
136
- [[maybe_unused]] MemoryScope mem_scope = MemoryScope::DEVICE) {
137
- #if __has_builtin(__scoped_atomic_exchange_n)
138
- return __scoped_atomic_exchange_n (&val, desired, int (mem_ord),
139
- (int )(mem_scope));
150
+ LIBC_INLINE T
151
+ exchange (T desired, MemoryOrder mem_ord = MemoryOrder::SEQ_CST,
152
+ [[maybe_unused]] MemoryScope mem_scope = MemoryScope::DEVICE) {
153
+ T ret;
154
+ #if __has_builtin(__scoped_atomic_exchange)
155
+ __scoped_atomic_exchange (&val, &desired, &ret, order (mem_ord),
156
+ scope (mem_scope));
140
157
#else
141
- return __atomic_exchange_n (&val, desired, int (mem_ord));
158
+ __atomic_exchange (&val, & desired, &ret, order (mem_ord));
142
159
#endif
160
+ return ret;
143
161
}
144
162
145
- T fetch_add (T increment, MemoryOrder mem_ord = MemoryOrder::SEQ_CST,
146
- [[maybe_unused]] MemoryScope mem_scope = MemoryScope::DEVICE) {
163
+ LIBC_INLINE T
164
+ fetch_add (T increment, MemoryOrder mem_ord = MemoryOrder::SEQ_CST,
165
+ [[maybe_unused]] MemoryScope mem_scope = MemoryScope::DEVICE) {
166
+ static_assert (cpp::is_integral_v<T>, " T must be an integral type." );
147
167
#if __has_builtin(__scoped_atomic_fetch_add)
148
- return __scoped_atomic_fetch_add (&val, increment, int (mem_ord),
149
- ( int ) (mem_scope));
168
+ return __scoped_atomic_fetch_add (&val, increment, order (mem_ord),
169
+ scope (mem_scope));
150
170
#else
151
- return __atomic_fetch_add (&val, increment, int (mem_ord));
171
+ return __atomic_fetch_add (&val, increment, order (mem_ord));
152
172
#endif
153
173
}
154
174
155
- T fetch_or (T mask, MemoryOrder mem_ord = MemoryOrder::SEQ_CST,
156
- [[maybe_unused]] MemoryScope mem_scope = MemoryScope::DEVICE) {
175
+ LIBC_INLINE T
176
+ fetch_or (T mask, MemoryOrder mem_ord = MemoryOrder::SEQ_CST,
177
+ [[maybe_unused]] MemoryScope mem_scope = MemoryScope::DEVICE) {
178
+ static_assert (cpp::is_integral_v<T>, " T must be an integral type." );
157
179
#if __has_builtin(__scoped_atomic_fetch_or)
158
- return __scoped_atomic_fetch_or (&val, mask, int (mem_ord), (int )(mem_scope));
180
+ return __scoped_atomic_fetch_or (&val, mask, order (mem_ord),
181
+ scope (mem_scope));
159
182
#else
160
- return __atomic_fetch_or (&val, mask, int (mem_ord));
183
+ return __atomic_fetch_or (&val, mask, order (mem_ord));
161
184
#endif
162
185
}
163
186
164
- T fetch_and (T mask, MemoryOrder mem_ord = MemoryOrder::SEQ_CST,
165
- [[maybe_unused]] MemoryScope mem_scope = MemoryScope::DEVICE) {
187
+ LIBC_INLINE T
188
+ fetch_and (T mask, MemoryOrder mem_ord = MemoryOrder::SEQ_CST,
189
+ [[maybe_unused]] MemoryScope mem_scope = MemoryScope::DEVICE) {
190
+ static_assert (cpp::is_integral_v<T>, " T must be an integral type." );
166
191
#if __has_builtin(__scoped_atomic_fetch_and)
167
- return __scoped_atomic_fetch_and (&val, mask, int (mem_ord),
168
- ( int ) (mem_scope));
192
+ return __scoped_atomic_fetch_and (&val, mask, order (mem_ord),
193
+ scope (mem_scope));
169
194
#else
170
- return __atomic_fetch_and (&val, mask, int (mem_ord));
195
+ return __atomic_fetch_and (&val, mask, order (mem_ord));
171
196
#endif
172
197
}
173
198
174
- T fetch_sub (T decrement, MemoryOrder mem_ord = MemoryOrder::SEQ_CST,
175
- [[maybe_unused]] MemoryScope mem_scope = MemoryScope::DEVICE) {
199
+ LIBC_INLINE T
200
+ fetch_sub (T decrement, MemoryOrder mem_ord = MemoryOrder::SEQ_CST,
201
+ [[maybe_unused]] MemoryScope mem_scope = MemoryScope::DEVICE) {
202
+ static_assert (cpp::is_integral_v<T>, " T must be an integral type." );
176
203
#if __has_builtin(__scoped_atomic_fetch_sub)
177
- return __scoped_atomic_fetch_sub (&val, decrement, int (mem_ord),
178
- ( int ) (mem_scope));
204
+ return __scoped_atomic_fetch_sub (&val, decrement, order (mem_ord),
205
+ scope (mem_scope));
179
206
#else
180
- return __atomic_fetch_sub (&val, decrement, int (mem_ord));
207
+ return __atomic_fetch_sub (&val, decrement, order (mem_ord));
181
208
#endif
182
209
}
183
210
184
211
// Set the value without using an atomic operation. This is useful
185
212
// in initializing atomic values without a constructor.
186
- void set (T rhs) { val = rhs; }
213
+ LIBC_INLINE void set (T rhs) { val = rhs; }
187
214
};
188
215
189
216
// Issue a thread fence with the given memory ordering.
0 commit comments