@@ -122,9 +122,12 @@ AtomicCompareExchange(multi_ptr<T, AddressSpace> MPtr,
122
122
auto SPIRVSuccess = getMemorySemanticsMask (Success);
123
123
auto SPIRVFailure = getMemorySemanticsMask (Failure);
124
124
auto SPIRVScope = getScope (Scope);
125
- auto *Ptr = MPtr.get ();
126
- return __spirv_AtomicCompareExchange (Ptr, SPIRVScope, SPIRVSuccess,
127
- SPIRVFailure, Desired, Expected);
125
+ using I = detail::atomic_integer_arg_t <T>;
126
+ auto *Ptr = reinterpret_cast <typename multi_ptr<I, AddressSpace>::pointer_t >(
127
+ MPtr.get ());
128
+
129
+ return (T)__spirv_AtomicCompareExchange (
130
+ Ptr, SPIRVScope, SPIRVSuccess, SPIRVFailure, (I)Desired, (I)Expected);
128
131
}
129
132
130
133
template <typename T, access::address_space AddressSpace>
@@ -150,10 +153,12 @@ template <typename T, access::address_space AddressSpace>
150
153
inline typename detail::enable_if_t <std::is_integral<T>::value, T>
151
154
AtomicLoad (multi_ptr<T, AddressSpace> MPtr, intel::memory_scope Scope,
152
155
intel::memory_order Order) {
153
- auto *Ptr = MPtr.get ();
156
+ using I = detail::atomic_integer_arg_t <T>;
157
+ auto *Ptr = reinterpret_cast <typename multi_ptr<I, AddressSpace>::pointer_t >(
158
+ MPtr.get ());
154
159
auto SPIRVOrder = getMemorySemanticsMask (Order);
155
160
auto SPIRVScope = getScope (Scope);
156
- return __spirv_AtomicLoad (Ptr, SPIRVScope, SPIRVOrder);
161
+ return (T) __spirv_AtomicLoad (Ptr, SPIRVScope, SPIRVOrder);
157
162
}
158
163
159
164
template <typename T, access::address_space AddressSpace>
@@ -174,10 +179,12 @@ template <typename T, access::address_space AddressSpace>
174
179
inline typename detail::enable_if_t <std::is_integral<T>::value>
175
180
AtomicStore (multi_ptr<T, AddressSpace> MPtr, intel::memory_scope Scope,
176
181
intel::memory_order Order, T Value) {
177
- auto *Ptr = MPtr.get ();
182
+ using I = detail::atomic_integer_arg_t <T>;
183
+ auto *Ptr = reinterpret_cast <typename multi_ptr<I, AddressSpace>::pointer_t >(
184
+ MPtr.get ());
178
185
auto SPIRVOrder = getMemorySemanticsMask (Order);
179
186
auto SPIRVScope = getScope (Scope);
180
- __spirv_AtomicStore (Ptr, SPIRVScope, SPIRVOrder, Value);
187
+ __spirv_AtomicStore (Ptr, SPIRVScope, SPIRVOrder, (I) Value);
181
188
}
182
189
183
190
template <typename T, access::address_space AddressSpace>
@@ -198,10 +205,12 @@ template <typename T, access::address_space AddressSpace>
198
205
inline typename detail::enable_if_t <std::is_integral<T>::value, T>
199
206
AtomicExchange (multi_ptr<T, AddressSpace> MPtr, intel::memory_scope Scope,
200
207
intel::memory_order Order, T Value) {
201
- auto *Ptr = MPtr.get ();
208
+ using I = detail::atomic_integer_arg_t <T>;
209
+ auto *Ptr = reinterpret_cast <typename multi_ptr<I, AddressSpace>::pointer_t >(
210
+ MPtr.get ());
202
211
auto SPIRVOrder = getMemorySemanticsMask (Order);
203
212
auto SPIRVScope = getScope (Scope);
204
- return __spirv_AtomicExchange (Ptr, SPIRVScope, SPIRVOrder, Value);
213
+ return (T) __spirv_AtomicExchange (Ptr, SPIRVScope, SPIRVOrder, (I) Value);
205
214
}
206
215
207
216
template <typename T, access::address_space AddressSpace>
@@ -224,70 +233,84 @@ template <typename T, access::address_space AddressSpace>
224
233
inline typename detail::enable_if_t <std::is_integral<T>::value, T>
225
234
AtomicIAdd (multi_ptr<T, AddressSpace> MPtr, intel::memory_scope Scope,
226
235
intel::memory_order Order, T Value) {
227
- auto *Ptr = MPtr.get ();
236
+ using I = detail::atomic_integer_arg_t <T>;
237
+ auto *Ptr = reinterpret_cast <typename multi_ptr<I, AddressSpace>::pointer_t >(
238
+ MPtr.get ());
228
239
auto SPIRVOrder = getMemorySemanticsMask (Order);
229
240
auto SPIRVScope = getScope (Scope);
230
- return __spirv_AtomicIAdd (Ptr, SPIRVScope, SPIRVOrder, Value);
241
+ return (T) __spirv_AtomicIAdd (Ptr, SPIRVScope, SPIRVOrder, (I) Value);
231
242
}
232
243
233
244
template <typename T, access::address_space AddressSpace>
234
245
inline typename detail::enable_if_t <std::is_integral<T>::value, T>
235
246
AtomicISub (multi_ptr<T, AddressSpace> MPtr, intel::memory_scope Scope,
236
247
intel::memory_order Order, T Value) {
237
- auto *Ptr = MPtr.get ();
248
+ using I = detail::atomic_integer_arg_t <T>;
249
+ auto *Ptr = reinterpret_cast <typename multi_ptr<I, AddressSpace>::pointer_t >(
250
+ MPtr.get ());
238
251
auto SPIRVOrder = getMemorySemanticsMask (Order);
239
252
auto SPIRVScope = getScope (Scope);
240
- return __spirv_AtomicISub (Ptr, SPIRVScope, SPIRVOrder, Value);
253
+ return (T) __spirv_AtomicISub (Ptr, SPIRVScope, SPIRVOrder, (I) Value);
241
254
}
242
255
243
256
template <typename T, access::address_space AddressSpace>
244
257
inline typename detail::enable_if_t <std::is_integral<T>::value, T>
245
258
AtomicAnd (multi_ptr<T, AddressSpace> MPtr, intel::memory_scope Scope,
246
259
intel::memory_order Order, T Value) {
247
- auto *Ptr = MPtr.get ();
260
+ using I = detail::atomic_integer_arg_t <T>;
261
+ auto *Ptr = reinterpret_cast <typename multi_ptr<I, AddressSpace>::pointer_t >(
262
+ MPtr.get ());
248
263
auto SPIRVOrder = getMemorySemanticsMask (Order);
249
264
auto SPIRVScope = getScope (Scope);
250
- return __spirv_AtomicAnd (Ptr, SPIRVScope, SPIRVOrder, Value);
265
+ return (T) __spirv_AtomicAnd (Ptr, SPIRVScope, SPIRVOrder, (I) Value);
251
266
}
252
267
253
268
template <typename T, access::address_space AddressSpace>
254
269
inline typename detail::enable_if_t <std::is_integral<T>::value, T>
255
270
AtomicOr (multi_ptr<T, AddressSpace> MPtr, intel::memory_scope Scope,
256
271
intel::memory_order Order, T Value) {
257
- auto *Ptr = MPtr.get ();
272
+ using I = detail::atomic_integer_arg_t <T>;
273
+ auto *Ptr = reinterpret_cast <typename multi_ptr<I, AddressSpace>::pointer_t >(
274
+ MPtr.get ());
258
275
auto SPIRVOrder = getMemorySemanticsMask (Order);
259
276
auto SPIRVScope = getScope (Scope);
260
- return __spirv_AtomicOr (Ptr, SPIRVScope, SPIRVOrder, Value);
277
+ return (T) __spirv_AtomicOr (Ptr, SPIRVScope, SPIRVOrder, (I) Value);
261
278
}
262
279
263
280
template <typename T, access::address_space AddressSpace>
264
281
inline typename detail::enable_if_t <std::is_integral<T>::value, T>
265
282
AtomicXor (multi_ptr<T, AddressSpace> MPtr, intel::memory_scope Scope,
266
283
intel::memory_order Order, T Value) {
267
- auto *Ptr = MPtr.get ();
284
+ using I = detail::atomic_integer_arg_t <T>;
285
+ auto *Ptr = reinterpret_cast <typename multi_ptr<I, AddressSpace>::pointer_t >(
286
+ MPtr.get ());
268
287
auto SPIRVOrder = getMemorySemanticsMask (Order);
269
288
auto SPIRVScope = getScope (Scope);
270
- return __spirv_AtomicXor (Ptr, SPIRVScope, SPIRVOrder, Value);
289
+ return (T) __spirv_AtomicXor (Ptr, SPIRVScope, SPIRVOrder, (I) Value);
271
290
}
272
291
273
292
template <typename T, access::address_space AddressSpace>
274
293
inline typename detail::enable_if_t <std::is_integral<T>::value, T>
275
294
AtomicMin (multi_ptr<T, AddressSpace> MPtr, intel::memory_scope Scope,
276
295
intel::memory_order Order, T Value) {
277
- auto *Ptr = MPtr.get ();
296
+ using I = detail::atomic_integer_arg_t <T>;
297
+ auto *Ptr = reinterpret_cast <typename multi_ptr<I, AddressSpace>::pointer_t >(
298
+ MPtr.get ());
278
299
auto SPIRVOrder = getMemorySemanticsMask (Order);
279
300
auto SPIRVScope = getScope (Scope);
280
- return __spirv_AtomicMin (Ptr, SPIRVScope, SPIRVOrder, Value);
301
+ return (T) __spirv_AtomicMin (Ptr, SPIRVScope, SPIRVOrder, (I) Value);
281
302
}
282
303
283
304
template <typename T, access::address_space AddressSpace>
284
305
inline typename detail::enable_if_t <std::is_integral<T>::value, T>
285
306
AtomicMax (multi_ptr<T, AddressSpace> MPtr, intel::memory_scope Scope,
286
307
intel::memory_order Order, T Value) {
287
- auto *Ptr = MPtr.get ();
308
+ using I = detail::atomic_integer_arg_t <T>;
309
+ auto *Ptr = reinterpret_cast <typename multi_ptr<I, AddressSpace>::pointer_t >(
310
+ MPtr.get ());
288
311
auto SPIRVOrder = getMemorySemanticsMask (Order);
289
312
auto SPIRVScope = getScope (Scope);
290
- return __spirv_AtomicMax (Ptr, SPIRVScope, SPIRVOrder, Value);
313
+ return (T) __spirv_AtomicMax (Ptr, SPIRVScope, SPIRVOrder, (I) Value);
291
314
}
292
315
293
316
// Native shuffles map directly to a SPIR-V SubgroupShuffle intrinsic
0 commit comments