@@ -14,6 +14,8 @@ extern void __cmpxchg_wrong_size(void)
14
14
__compiletime_error ("Bad argument size for cmpxchg" );
15
15
extern void __xadd_wrong_size (void )
16
16
__compiletime_error ("Bad argument size for xadd" );
17
+ extern void __add_wrong_size (void )
18
+ __compiletime_error ("Bad argument size for add" );
17
19
18
20
/*
19
21
* Constants for operation sizes. On 32-bit, the 64-bit size it set to
@@ -207,4 +209,44 @@ extern void __xadd_wrong_size(void)
207
209
#define xadd_sync (ptr , inc ) __xadd((ptr), (inc), "lock; ")
208
210
#define xadd_local (ptr , inc ) __xadd((ptr), (inc), "")
209
211
212
+ #define __add (ptr , inc , lock ) \
213
+ ({ \
214
+ __typeof__ (*(ptr)) __ret = (inc); \
215
+ switch (sizeof(*(ptr))) { \
216
+ case __X86_CASE_B: \
217
+ asm volatile (lock "addb %b1, %0\n" \
218
+ : "+m" (*(ptr)) : "ri" (inc) \
219
+ : "memory", "cc"); \
220
+ break; \
221
+ case __X86_CASE_W: \
222
+ asm volatile (lock "addw %w1, %0\n" \
223
+ : "+m" (*(ptr)) : "ri" (inc) \
224
+ : "memory", "cc"); \
225
+ break; \
226
+ case __X86_CASE_L: \
227
+ asm volatile (lock "addl %1, %0\n" \
228
+ : "+m" (*(ptr)) : "ri" (inc) \
229
+ : "memory", "cc"); \
230
+ break; \
231
+ case __X86_CASE_Q: \
232
+ asm volatile (lock "addq %1, %0\n" \
233
+ : "+m" (*(ptr)) : "ri" (inc) \
234
+ : "memory", "cc"); \
235
+ break; \
236
+ default: \
237
+ __add_wrong_size(); \
238
+ } \
239
+ __ret; \
240
+ })
241
+
242
+ /*
243
+ * add_*() adds "inc" to "*ptr"
244
+ *
245
+ * __add() takes a lock prefix
246
+ * add_smp() is locked when multiple CPUs are online
247
+ * add_sync() is always locked
248
+ */
249
+ #define add_smp (ptr , inc ) __add((ptr), (inc), LOCK_PREFIX)
250
+ #define add_sync (ptr , inc ) __add((ptr), (inc), "lock; ")
251
+
210
252
#endif /* ASM_X86_CMPXCHG_H */
0 commit comments