Skip to content

Commit 3d94ae0

Browse files
committed
x86/cmpxchg: add a locked add() helper
Mostly to remove some conditional code in spinlock.h. Signed-off-by: Jeremy Fitzhardinge <[email protected]>
1 parent 4a7f340 commit 3d94ae0

File tree

2 files changed

+43
-14
lines changed

2 files changed

+43
-14
lines changed

arch/x86/include/asm/cmpxchg.h

Lines changed: 42 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -14,6 +14,8 @@ extern void __cmpxchg_wrong_size(void)
1414
__compiletime_error("Bad argument size for cmpxchg");
1515
extern void __xadd_wrong_size(void)
1616
__compiletime_error("Bad argument size for xadd");
17+
extern void __add_wrong_size(void)
18+
__compiletime_error("Bad argument size for add");
1719

1820
/*
1921
* Constants for operation sizes. On 32-bit, the 64-bit size it set to
@@ -207,4 +209,44 @@ extern void __xadd_wrong_size(void)
207209
#define xadd_sync(ptr, inc) __xadd((ptr), (inc), "lock; ")
208210
#define xadd_local(ptr, inc) __xadd((ptr), (inc), "")
209211

212+
#define __add(ptr, inc, lock) \
213+
({ \
214+
__typeof__ (*(ptr)) __ret = (inc); \
215+
switch (sizeof(*(ptr))) { \
216+
case __X86_CASE_B: \
217+
asm volatile (lock "addb %b1, %0\n" \
218+
: "+m" (*(ptr)) : "ri" (inc) \
219+
: "memory", "cc"); \
220+
break; \
221+
case __X86_CASE_W: \
222+
asm volatile (lock "addw %w1, %0\n" \
223+
: "+m" (*(ptr)) : "ri" (inc) \
224+
: "memory", "cc"); \
225+
break; \
226+
case __X86_CASE_L: \
227+
asm volatile (lock "addl %1, %0\n" \
228+
: "+m" (*(ptr)) : "ri" (inc) \
229+
: "memory", "cc"); \
230+
break; \
231+
case __X86_CASE_Q: \
232+
asm volatile (lock "addq %1, %0\n" \
233+
: "+m" (*(ptr)) : "ri" (inc) \
234+
: "memory", "cc"); \
235+
break; \
236+
default: \
237+
__add_wrong_size(); \
238+
} \
239+
__ret; \
240+
})
241+
242+
/*
243+
* add_*() adds "inc" to "*ptr"
244+
*
245+
* __add() takes a lock prefix
246+
* add_smp() is locked when multiple CPUs are online
247+
* add_sync() is always locked
248+
*/
249+
#define add_smp(ptr, inc) __add((ptr), (inc), LOCK_PREFIX)
250+
#define add_sync(ptr, inc) __add((ptr), (inc), "lock; ")
251+
210252
#endif /* ASM_X86_CMPXCHG_H */

arch/x86/include/asm/spinlock.h

Lines changed: 1 addition & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -79,23 +79,10 @@ static __always_inline int __ticket_spin_trylock(arch_spinlock_t *lock)
7979
return cmpxchg(&lock->head_tail, old.head_tail, new.head_tail) == old.head_tail;
8080
}
8181

82-
#if (NR_CPUS < 256)
8382
static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock)
8483
{
85-
asm volatile(UNLOCK_LOCK_PREFIX "incb %0"
86-
: "+m" (lock->head_tail)
87-
:
88-
: "memory", "cc");
84+
__add(&lock->tickets.head, 1, UNLOCK_LOCK_PREFIX);
8985
}
90-
#else
91-
static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock)
92-
{
93-
asm volatile(UNLOCK_LOCK_PREFIX "incw %0"
94-
: "+m" (lock->head_tail)
95-
:
96-
: "memory", "cc");
97-
}
98-
#endif
9986

10087
static inline int __ticket_spin_is_locked(arch_spinlock_t *lock)
10188
{

0 commit comments

Comments
 (0)