Skip to content

Commit 01acc26

Browse files
author
Ingo Molnar
committed
Merge branch 'upstream/ticketlock-cleanup' of git://git.kernel.org/pub/scm/linux/kernel/git/jeremy/xen into x86/asm
2 parents 8e8da02 + 31a8394 commit 01acc26

File tree

2 files changed

+71
-84
lines changed

2 files changed

+71
-84
lines changed

arch/x86/include/asm/cmpxchg.h

Lines changed: 70 additions & 70 deletions
Original file line numberDiff line numberDiff line change
@@ -14,6 +14,8 @@ extern void __cmpxchg_wrong_size(void)
1414
__compiletime_error("Bad argument size for cmpxchg");
1515
extern void __xadd_wrong_size(void)
1616
__compiletime_error("Bad argument size for xadd");
17+
extern void __add_wrong_size(void)
18+
__compiletime_error("Bad argument size for add");
1719

1820
/*
1921
* Constants for operation sizes. On 32-bit, the 64-bit size it set to
@@ -31,60 +33,47 @@ extern void __xadd_wrong_size(void)
3133
#define __X86_CASE_Q -1 /* sizeof will never return -1 */
3234
#endif
3335

36+
/*
37+
* An exchange-type operation, which takes a value and a pointer, and
38+
* returns a the old value.
39+
*/
40+
#define __xchg_op(ptr, arg, op, lock) \
41+
({ \
42+
__typeof__ (*(ptr)) __ret = (arg); \
43+
switch (sizeof(*(ptr))) { \
44+
case __X86_CASE_B: \
45+
asm volatile (lock #op "b %b0, %1\n" \
46+
: "+r" (__ret), "+m" (*(ptr)) \
47+
: : "memory", "cc"); \
48+
break; \
49+
case __X86_CASE_W: \
50+
asm volatile (lock #op "w %w0, %1\n" \
51+
: "+r" (__ret), "+m" (*(ptr)) \
52+
: : "memory", "cc"); \
53+
break; \
54+
case __X86_CASE_L: \
55+
asm volatile (lock #op "l %0, %1\n" \
56+
: "+r" (__ret), "+m" (*(ptr)) \
57+
: : "memory", "cc"); \
58+
break; \
59+
case __X86_CASE_Q: \
60+
asm volatile (lock #op "q %q0, %1\n" \
61+
: "+r" (__ret), "+m" (*(ptr)) \
62+
: : "memory", "cc"); \
63+
break; \
64+
default: \
65+
__ ## op ## _wrong_size(); \
66+
} \
67+
__ret; \
68+
})
69+
3470
/*
3571
* Note: no "lock" prefix even on SMP: xchg always implies lock anyway.
3672
* Since this is generally used to protect other memory information, we
3773
* use "asm volatile" and "memory" clobbers to prevent gcc from moving
3874
* information around.
3975
*/
40-
#define __xchg(x, ptr, size) \
41-
({ \
42-
__typeof(*(ptr)) __x = (x); \
43-
switch (size) { \
44-
case __X86_CASE_B: \
45-
{ \
46-
volatile u8 *__ptr = (volatile u8 *)(ptr); \
47-
asm volatile("xchgb %0,%1" \
48-
: "=q" (__x), "+m" (*__ptr) \
49-
: "0" (__x) \
50-
: "memory"); \
51-
break; \
52-
} \
53-
case __X86_CASE_W: \
54-
{ \
55-
volatile u16 *__ptr = (volatile u16 *)(ptr); \
56-
asm volatile("xchgw %0,%1" \
57-
: "=r" (__x), "+m" (*__ptr) \
58-
: "0" (__x) \
59-
: "memory"); \
60-
break; \
61-
} \
62-
case __X86_CASE_L: \
63-
{ \
64-
volatile u32 *__ptr = (volatile u32 *)(ptr); \
65-
asm volatile("xchgl %0,%1" \
66-
: "=r" (__x), "+m" (*__ptr) \
67-
: "0" (__x) \
68-
: "memory"); \
69-
break; \
70-
} \
71-
case __X86_CASE_Q: \
72-
{ \
73-
volatile u64 *__ptr = (volatile u64 *)(ptr); \
74-
asm volatile("xchgq %0,%1" \
75-
: "=r" (__x), "+m" (*__ptr) \
76-
: "0" (__x) \
77-
: "memory"); \
78-
break; \
79-
} \
80-
default: \
81-
__xchg_wrong_size(); \
82-
} \
83-
__x; \
84-
})
85-
86-
#define xchg(ptr, v) \
87-
__xchg((v), (ptr), sizeof(*ptr))
76+
#define xchg(ptr, v) __xchg_op((ptr), (v), xchg, "")
8877

8978
/*
9079
* Atomic compare and exchange. Compare OLD with MEM, if identical,
@@ -165,46 +154,57 @@ extern void __xadd_wrong_size(void)
165154
__cmpxchg_local((ptr), (old), (new), sizeof(*ptr))
166155
#endif
167156

168-
#define __xadd(ptr, inc, lock) \
157+
/*
158+
* xadd() adds "inc" to "*ptr" and atomically returns the previous
159+
* value of "*ptr".
160+
*
161+
* xadd() is locked when multiple CPUs are online
162+
* xadd_sync() is always locked
163+
* xadd_local() is never locked
164+
*/
165+
#define __xadd(ptr, inc, lock) __xchg_op((ptr), (inc), xadd, lock)
166+
#define xadd(ptr, inc) __xadd((ptr), (inc), LOCK_PREFIX)
167+
#define xadd_sync(ptr, inc) __xadd((ptr), (inc), "lock; ")
168+
#define xadd_local(ptr, inc) __xadd((ptr), (inc), "")
169+
170+
#define __add(ptr, inc, lock) \
169171
({ \
170172
__typeof__ (*(ptr)) __ret = (inc); \
171173
switch (sizeof(*(ptr))) { \
172174
case __X86_CASE_B: \
173-
asm volatile (lock "xaddb %b0, %1\n" \
174-
: "+r" (__ret), "+m" (*(ptr)) \
175-
: : "memory", "cc"); \
175+
asm volatile (lock "addb %b1, %0\n" \
176+
: "+m" (*(ptr)) : "ri" (inc) \
177+
: "memory", "cc"); \
176178
break; \
177179
case __X86_CASE_W: \
178-
asm volatile (lock "xaddw %w0, %1\n" \
179-
: "+r" (__ret), "+m" (*(ptr)) \
180-
: : "memory", "cc"); \
180+
asm volatile (lock "addw %w1, %0\n" \
181+
: "+m" (*(ptr)) : "ri" (inc) \
182+
: "memory", "cc"); \
181183
break; \
182184
case __X86_CASE_L: \
183-
asm volatile (lock "xaddl %0, %1\n" \
184-
: "+r" (__ret), "+m" (*(ptr)) \
185-
: : "memory", "cc"); \
185+
asm volatile (lock "addl %1, %0\n" \
186+
: "+m" (*(ptr)) : "ri" (inc) \
187+
: "memory", "cc"); \
186188
break; \
187189
case __X86_CASE_Q: \
188-
asm volatile (lock "xaddq %q0, %1\n" \
189-
: "+r" (__ret), "+m" (*(ptr)) \
190-
: : "memory", "cc"); \
190+
asm volatile (lock "addq %1, %0\n" \
191+
: "+m" (*(ptr)) : "ri" (inc) \
192+
: "memory", "cc"); \
191193
break; \
192194
default: \
193-
__xadd_wrong_size(); \
195+
__add_wrong_size(); \
194196
} \
195197
__ret; \
196198
})
197199

198200
/*
199-
* xadd() adds "inc" to "*ptr" and atomically returns the previous
200-
* value of "*ptr".
201+
* add_*() adds "inc" to "*ptr"
201202
*
202-
* xadd() is locked when multiple CPUs are online
203-
* xadd_sync() is always locked
204-
* xadd_local() is never locked
203+
* __add() takes a lock prefix
204+
* add_smp() is locked when multiple CPUs are online
205+
* add_sync() is always locked
205206
*/
206-
#define xadd(ptr, inc) __xadd((ptr), (inc), LOCK_PREFIX)
207-
#define xadd_sync(ptr, inc) __xadd((ptr), (inc), "lock; ")
208-
#define xadd_local(ptr, inc) __xadd((ptr), (inc), "")
207+
#define add_smp(ptr, inc) __add((ptr), (inc), LOCK_PREFIX)
208+
#define add_sync(ptr, inc) __add((ptr), (inc), "lock; ")
209209

210210
#endif /* ASM_X86_CMPXCHG_H */

arch/x86/include/asm/spinlock.h

Lines changed: 1 addition & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -79,23 +79,10 @@ static __always_inline int __ticket_spin_trylock(arch_spinlock_t *lock)
7979
return cmpxchg(&lock->head_tail, old.head_tail, new.head_tail) == old.head_tail;
8080
}
8181

82-
#if (NR_CPUS < 256)
8382
static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock)
8483
{
85-
asm volatile(UNLOCK_LOCK_PREFIX "incb %0"
86-
: "+m" (lock->head_tail)
87-
:
88-
: "memory", "cc");
84+
__add(&lock->tickets.head, 1, UNLOCK_LOCK_PREFIX);
8985
}
90-
#else
91-
static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock)
92-
{
93-
asm volatile(UNLOCK_LOCK_PREFIX "incw %0"
94-
: "+m" (lock->head_tail)
95-
:
96-
: "memory", "cc");
97-
}
98-
#endif
9986

10087
static inline int __ticket_spin_is_locked(arch_spinlock_t *lock)
10188
{

0 commit comments

Comments
 (0)