@@ -14,6 +14,8 @@ extern void __cmpxchg_wrong_size(void)
14
14
__compiletime_error ("Bad argument size for cmpxchg" );
15
15
extern void __xadd_wrong_size (void )
16
16
__compiletime_error ("Bad argument size for xadd" );
17
+ extern void __add_wrong_size (void )
18
+ __compiletime_error ("Bad argument size for add" );
17
19
18
20
/*
19
21
* Constants for operation sizes. On 32-bit, the 64-bit size it set to
@@ -31,60 +33,47 @@ extern void __xadd_wrong_size(void)
31
33
#define __X86_CASE_Q -1 /* sizeof will never return -1 */
32
34
#endif
33
35
36
+ /*
37
+ * An exchange-type operation, which takes a value and a pointer, and
38
+ * returns a the old value.
39
+ */
40
+ #define __xchg_op (ptr , arg , op , lock ) \
41
+ ({ \
42
+ __typeof__ (*(ptr)) __ret = (arg); \
43
+ switch (sizeof(*(ptr))) { \
44
+ case __X86_CASE_B: \
45
+ asm volatile (lock #op "b %b0, %1\n" \
46
+ : "+r" (__ret), "+m" (*(ptr)) \
47
+ : : "memory", "cc"); \
48
+ break; \
49
+ case __X86_CASE_W: \
50
+ asm volatile (lock #op "w %w0, %1\n" \
51
+ : "+r" (__ret), "+m" (*(ptr)) \
52
+ : : "memory", "cc"); \
53
+ break; \
54
+ case __X86_CASE_L: \
55
+ asm volatile (lock #op "l %0, %1\n" \
56
+ : "+r" (__ret), "+m" (*(ptr)) \
57
+ : : "memory", "cc"); \
58
+ break; \
59
+ case __X86_CASE_Q: \
60
+ asm volatile (lock #op "q %q0, %1\n" \
61
+ : "+r" (__ret), "+m" (*(ptr)) \
62
+ : : "memory", "cc"); \
63
+ break; \
64
+ default: \
65
+ __ ## op ## _wrong_size(); \
66
+ } \
67
+ __ret; \
68
+ })
69
+
34
70
/*
35
71
* Note: no "lock" prefix even on SMP: xchg always implies lock anyway.
36
72
* Since this is generally used to protect other memory information, we
37
73
* use "asm volatile" and "memory" clobbers to prevent gcc from moving
38
74
* information around.
39
75
*/
40
- #define __xchg (x , ptr , size ) \
41
- ({ \
42
- __typeof(*(ptr)) __x = (x); \
43
- switch (size) { \
44
- case __X86_CASE_B: \
45
- { \
46
- volatile u8 *__ptr = (volatile u8 *)(ptr); \
47
- asm volatile("xchgb %0,%1" \
48
- : "=q" (__x), "+m" (*__ptr) \
49
- : "0" (__x) \
50
- : "memory"); \
51
- break; \
52
- } \
53
- case __X86_CASE_W: \
54
- { \
55
- volatile u16 *__ptr = (volatile u16 *)(ptr); \
56
- asm volatile("xchgw %0,%1" \
57
- : "=r" (__x), "+m" (*__ptr) \
58
- : "0" (__x) \
59
- : "memory"); \
60
- break; \
61
- } \
62
- case __X86_CASE_L: \
63
- { \
64
- volatile u32 *__ptr = (volatile u32 *)(ptr); \
65
- asm volatile("xchgl %0,%1" \
66
- : "=r" (__x), "+m" (*__ptr) \
67
- : "0" (__x) \
68
- : "memory"); \
69
- break; \
70
- } \
71
- case __X86_CASE_Q: \
72
- { \
73
- volatile u64 *__ptr = (volatile u64 *)(ptr); \
74
- asm volatile("xchgq %0,%1" \
75
- : "=r" (__x), "+m" (*__ptr) \
76
- : "0" (__x) \
77
- : "memory"); \
78
- break; \
79
- } \
80
- default: \
81
- __xchg_wrong_size(); \
82
- } \
83
- __x; \
84
- })
85
-
86
- #define xchg (ptr , v ) \
87
- __xchg((v), (ptr), sizeof(*ptr))
76
+ #define xchg (ptr , v ) __xchg_op((ptr), (v), xchg, "")
88
77
89
78
/*
90
79
* Atomic compare and exchange. Compare OLD with MEM, if identical,
@@ -165,46 +154,57 @@ extern void __xadd_wrong_size(void)
165
154
__cmpxchg_local((ptr), (old), (new), sizeof(*ptr))
166
155
#endif
167
156
168
- #define __xadd (ptr , inc , lock ) \
157
+ /*
158
+ * xadd() adds "inc" to "*ptr" and atomically returns the previous
159
+ * value of "*ptr".
160
+ *
161
+ * xadd() is locked when multiple CPUs are online
162
+ * xadd_sync() is always locked
163
+ * xadd_local() is never locked
164
+ */
165
+ #define __xadd (ptr , inc , lock ) __xchg_op((ptr), (inc), xadd, lock)
166
+ #define xadd (ptr , inc ) __xadd((ptr), (inc), LOCK_PREFIX)
167
+ #define xadd_sync (ptr , inc ) __xadd((ptr), (inc), "lock; ")
168
+ #define xadd_local (ptr , inc ) __xadd((ptr), (inc), "")
169
+
170
+ #define __add (ptr , inc , lock ) \
169
171
({ \
170
172
__typeof__ (*(ptr)) __ret = (inc); \
171
173
switch (sizeof(*(ptr))) { \
172
174
case __X86_CASE_B: \
173
- asm volatile (lock "xaddb %b0 , %1 \n" \
174
- : "+r " (__ret), "+m " (*(ptr) ) \
175
- : : "memory", "cc"); \
175
+ asm volatile (lock "addb %b1 , %0 \n" \
176
+ : "+m " (*(ptr)) : "ri " (inc ) \
177
+ : "memory", "cc"); \
176
178
break; \
177
179
case __X86_CASE_W: \
178
- asm volatile (lock "xaddw %w0 , %1 \n" \
179
- : "+r " (__ret), "+m " (*(ptr) ) \
180
- : : "memory", "cc"); \
180
+ asm volatile (lock "addw %w1 , %0 \n" \
181
+ : "+m " (*(ptr)) : "ri " (inc ) \
182
+ : "memory", "cc"); \
181
183
break; \
182
184
case __X86_CASE_L: \
183
- asm volatile (lock "xaddl %0 , %1 \n" \
184
- : "+r " (__ret), "+m " (*(ptr) ) \
185
- : : "memory", "cc"); \
185
+ asm volatile (lock "addl %1 , %0 \n" \
186
+ : "+m " (*(ptr)) : "ri " (inc ) \
187
+ : "memory", "cc"); \
186
188
break; \
187
189
case __X86_CASE_Q: \
188
- asm volatile (lock "xaddq %q0 , %1 \n" \
189
- : "+r " (__ret), "+m " (*(ptr) ) \
190
- : : "memory", "cc"); \
190
+ asm volatile (lock "addq %1 , %0 \n" \
191
+ : "+m " (*(ptr)) : "ri " (inc ) \
192
+ : "memory", "cc"); \
191
193
break; \
192
194
default: \
193
- __xadd_wrong_size (); \
195
+ __add_wrong_size (); \
194
196
} \
195
197
__ret; \
196
198
})
197
199
198
200
/*
199
- * xadd() adds "inc" to "*ptr" and atomically returns the previous
200
- * value of "*ptr".
201
+ * add_*() adds "inc" to "*ptr"
201
202
*
202
- * xadd () is locked when multiple CPUs are online
203
- * xadd_sync () is always locked
204
- * xadd_local () is never locked
203
+ * __add () takes a lock prefix
204
+ * add_smp () is locked when multiple CPUs are online
205
+ * add_sync () is always locked
205
206
*/
206
- #define xadd (ptr , inc ) __xadd((ptr), (inc), LOCK_PREFIX)
207
- #define xadd_sync (ptr , inc ) __xadd((ptr), (inc), "lock; ")
208
- #define xadd_local (ptr , inc ) __xadd((ptr), (inc), "")
207
+ #define add_smp (ptr , inc ) __add((ptr), (inc), LOCK_PREFIX)
208
+ #define add_sync (ptr , inc ) __add((ptr), (inc), "lock; ")
209
209
210
210
#endif /* ASM_X86_CMPXCHG_H */
0 commit comments