Skip to content

Commit 625e88b

Browse files
wildea01Ingo Molnar
authored andcommitted
locking/qspinlock: Merge 'struct __qspinlock' into 'struct qspinlock'
'struct __qspinlock' provides a handy union of fields so that subcomponents of the lockword can be accessed by name, without having to manage shifts and masks explicitly and take endianness into account. This is useful in qspinlock.h and also potentially in arch headers, so move the 'struct __qspinlock' into 'struct qspinlock' and kill the extra definition. Signed-off-by: Will Deacon <[email protected]> Acked-by: Peter Zijlstra (Intel) <[email protected]> Acked-by: Waiman Long <[email protected]> Acked-by: Boqun Feng <[email protected]> Cc: Linus Torvalds <[email protected]> Cc: Thomas Gleixner <[email protected]> Cc: [email protected] Cc: [email protected] Link: http://lkml.kernel.org/r/[email protected] Signed-off-by: Ingo Molnar <[email protected]>
1 parent fcfdfe3 commit 625e88b

File tree

5 files changed

+46
-71
lines changed

5 files changed

+46
-71
lines changed

arch/x86/include/asm/qspinlock.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -16,7 +16,7 @@
1616
*/
1717
static inline void native_queued_spin_unlock(struct qspinlock *lock)
1818
{
19-
smp_store_release((u8 *)lock, 0);
19+
smp_store_release(&lock->locked, 0);
2020
}
2121

2222
#ifdef CONFIG_PARAVIRT_SPINLOCKS

arch/x86/include/asm/qspinlock_paravirt.h

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -22,8 +22,7 @@ PV_CALLEE_SAVE_REGS_THUNK(__pv_queued_spin_unlock_slowpath);
2222
*
2323
* void __pv_queued_spin_unlock(struct qspinlock *lock)
2424
* {
25-
* struct __qspinlock *l = (void *)lock;
26-
* u8 lockval = cmpxchg(&l->locked, _Q_LOCKED_VAL, 0);
25+
* u8 lockval = cmpxchg(&lock->locked, _Q_LOCKED_VAL, 0);
2726
*
2827
* if (likely(lockval == _Q_LOCKED_VAL))
2928
* return;

include/asm-generic/qspinlock_types.h

Lines changed: 30 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -29,13 +29,41 @@
2929
#endif
3030

3131
typedef struct qspinlock {
32-
atomic_t val;
32+
union {
33+
atomic_t val;
34+
35+
/*
36+
* By using the whole 2nd least significant byte for the
37+
* pending bit, we can allow better optimization of the lock
38+
* acquisition for the pending bit holder.
39+
*/
40+
#ifdef __LITTLE_ENDIAN
41+
struct {
42+
u8 locked;
43+
u8 pending;
44+
};
45+
struct {
46+
u16 locked_pending;
47+
u16 tail;
48+
};
49+
#else
50+
struct {
51+
u16 tail;
52+
u16 locked_pending;
53+
};
54+
struct {
55+
u8 reserved[2];
56+
u8 pending;
57+
u8 locked;
58+
};
59+
#endif
60+
};
3361
} arch_spinlock_t;
3462

3563
/*
3664
* Initializier
3765
*/
38-
#define __ARCH_SPIN_LOCK_UNLOCKED { ATOMIC_INIT(0) }
66+
#define __ARCH_SPIN_LOCK_UNLOCKED { .val = ATOMIC_INIT(0) }
3967

4068
/*
4169
* Bitfields in the atomic value:

kernel/locking/qspinlock.c

Lines changed: 3 additions & 43 deletions
Original file line numberDiff line numberDiff line change
@@ -114,40 +114,6 @@ static inline __pure struct mcs_spinlock *decode_tail(u32 tail)
114114

115115
#define _Q_LOCKED_PENDING_MASK (_Q_LOCKED_MASK | _Q_PENDING_MASK)
116116

117-
/*
118-
* By using the whole 2nd least significant byte for the pending bit, we
119-
* can allow better optimization of the lock acquisition for the pending
120-
* bit holder.
121-
*
122-
* This internal structure is also used by the set_locked function which
123-
* is not restricted to _Q_PENDING_BITS == 8.
124-
*/
125-
struct __qspinlock {
126-
union {
127-
atomic_t val;
128-
#ifdef __LITTLE_ENDIAN
129-
struct {
130-
u8 locked;
131-
u8 pending;
132-
};
133-
struct {
134-
u16 locked_pending;
135-
u16 tail;
136-
};
137-
#else
138-
struct {
139-
u16 tail;
140-
u16 locked_pending;
141-
};
142-
struct {
143-
u8 reserved[2];
144-
u8 pending;
145-
u8 locked;
146-
};
147-
#endif
148-
};
149-
};
150-
151117
#if _Q_PENDING_BITS == 8
152118
/**
153119
* clear_pending_set_locked - take ownership and clear the pending bit.
@@ -159,9 +125,7 @@ struct __qspinlock {
159125
*/
160126
static __always_inline void clear_pending_set_locked(struct qspinlock *lock)
161127
{
162-
struct __qspinlock *l = (void *)lock;
163-
164-
WRITE_ONCE(l->locked_pending, _Q_LOCKED_VAL);
128+
WRITE_ONCE(lock->locked_pending, _Q_LOCKED_VAL);
165129
}
166130

167131
/*
@@ -176,13 +140,11 @@ static __always_inline void clear_pending_set_locked(struct qspinlock *lock)
176140
*/
177141
static __always_inline u32 xchg_tail(struct qspinlock *lock, u32 tail)
178142
{
179-
struct __qspinlock *l = (void *)lock;
180-
181143
/*
182144
* Use release semantics to make sure that the MCS node is properly
183145
* initialized before changing the tail code.
184146
*/
185-
return (u32)xchg_release(&l->tail,
147+
return (u32)xchg_release(&lock->tail,
186148
tail >> _Q_TAIL_OFFSET) << _Q_TAIL_OFFSET;
187149
}
188150

@@ -237,9 +199,7 @@ static __always_inline u32 xchg_tail(struct qspinlock *lock, u32 tail)
237199
*/
238200
static __always_inline void set_locked(struct qspinlock *lock)
239201
{
240-
struct __qspinlock *l = (void *)lock;
241-
242-
WRITE_ONCE(l->locked, _Q_LOCKED_VAL);
202+
WRITE_ONCE(lock->locked, _Q_LOCKED_VAL);
243203
}
244204

245205

kernel/locking/qspinlock_paravirt.h

Lines changed: 11 additions & 23 deletions
Original file line numberDiff line numberDiff line change
@@ -87,8 +87,6 @@ struct pv_node {
8787
#define queued_spin_trylock(l) pv_hybrid_queued_unfair_trylock(l)
8888
static inline bool pv_hybrid_queued_unfair_trylock(struct qspinlock *lock)
8989
{
90-
struct __qspinlock *l = (void *)lock;
91-
9290
/*
9391
* Stay in unfair lock mode as long as queued mode waiters are
9492
* present in the MCS wait queue but the pending bit isn't set.
@@ -97,7 +95,7 @@ static inline bool pv_hybrid_queued_unfair_trylock(struct qspinlock *lock)
9795
int val = atomic_read(&lock->val);
9896

9997
if (!(val & _Q_LOCKED_PENDING_MASK) &&
100-
(cmpxchg_acquire(&l->locked, 0, _Q_LOCKED_VAL) == 0)) {
98+
(cmpxchg_acquire(&lock->locked, 0, _Q_LOCKED_VAL) == 0)) {
10199
qstat_inc(qstat_pv_lock_stealing, true);
102100
return true;
103101
}
@@ -117,16 +115,12 @@ static inline bool pv_hybrid_queued_unfair_trylock(struct qspinlock *lock)
117115
#if _Q_PENDING_BITS == 8
118116
static __always_inline void set_pending(struct qspinlock *lock)
119117
{
120-
struct __qspinlock *l = (void *)lock;
121-
122-
WRITE_ONCE(l->pending, 1);
118+
WRITE_ONCE(lock->pending, 1);
123119
}
124120

125121
static __always_inline void clear_pending(struct qspinlock *lock)
126122
{
127-
struct __qspinlock *l = (void *)lock;
128-
129-
WRITE_ONCE(l->pending, 0);
123+
WRITE_ONCE(lock->pending, 0);
130124
}
131125

132126
/*
@@ -136,10 +130,8 @@ static __always_inline void clear_pending(struct qspinlock *lock)
136130
*/
137131
static __always_inline int trylock_clear_pending(struct qspinlock *lock)
138132
{
139-
struct __qspinlock *l = (void *)lock;
140-
141-
return !READ_ONCE(l->locked) &&
142-
(cmpxchg_acquire(&l->locked_pending, _Q_PENDING_VAL,
133+
return !READ_ONCE(lock->locked) &&
134+
(cmpxchg_acquire(&lock->locked_pending, _Q_PENDING_VAL,
143135
_Q_LOCKED_VAL) == _Q_PENDING_VAL);
144136
}
145137
#else /* _Q_PENDING_BITS == 8 */
@@ -384,7 +376,6 @@ static void pv_wait_node(struct mcs_spinlock *node, struct mcs_spinlock *prev)
384376
static void pv_kick_node(struct qspinlock *lock, struct mcs_spinlock *node)
385377
{
386378
struct pv_node *pn = (struct pv_node *)node;
387-
struct __qspinlock *l = (void *)lock;
388379

389380
/*
390381
* If the vCPU is indeed halted, advance its state to match that of
@@ -413,7 +404,7 @@ static void pv_kick_node(struct qspinlock *lock, struct mcs_spinlock *node)
413404
* the hash table later on at unlock time, no atomic instruction is
414405
* needed.
415406
*/
416-
WRITE_ONCE(l->locked, _Q_SLOW_VAL);
407+
WRITE_ONCE(lock->locked, _Q_SLOW_VAL);
417408
(void)pv_hash(lock, pn);
418409
}
419410

@@ -428,7 +419,6 @@ static u32
428419
pv_wait_head_or_lock(struct qspinlock *lock, struct mcs_spinlock *node)
429420
{
430421
struct pv_node *pn = (struct pv_node *)node;
431-
struct __qspinlock *l = (void *)lock;
432422
struct qspinlock **lp = NULL;
433423
int waitcnt = 0;
434424
int loop;
@@ -479,21 +469,21 @@ pv_wait_head_or_lock(struct qspinlock *lock, struct mcs_spinlock *node)
479469
*
480470
* Matches the smp_rmb() in __pv_queued_spin_unlock().
481471
*/
482-
if (xchg(&l->locked, _Q_SLOW_VAL) == 0) {
472+
if (xchg(&lock->locked, _Q_SLOW_VAL) == 0) {
483473
/*
484474
* The lock was free and now we own the lock.
485475
* Change the lock value back to _Q_LOCKED_VAL
486476
* and unhash the table.
487477
*/
488-
WRITE_ONCE(l->locked, _Q_LOCKED_VAL);
478+
WRITE_ONCE(lock->locked, _Q_LOCKED_VAL);
489479
WRITE_ONCE(*lp, NULL);
490480
goto gotlock;
491481
}
492482
}
493483
WRITE_ONCE(pn->state, vcpu_hashed);
494484
qstat_inc(qstat_pv_wait_head, true);
495485
qstat_inc(qstat_pv_wait_again, waitcnt);
496-
pv_wait(&l->locked, _Q_SLOW_VAL);
486+
pv_wait(&lock->locked, _Q_SLOW_VAL);
497487

498488
/*
499489
* Because of lock stealing, the queue head vCPU may not be
@@ -518,7 +508,6 @@ pv_wait_head_or_lock(struct qspinlock *lock, struct mcs_spinlock *node)
518508
__visible void
519509
__pv_queued_spin_unlock_slowpath(struct qspinlock *lock, u8 locked)
520510
{
521-
struct __qspinlock *l = (void *)lock;
522511
struct pv_node *node;
523512

524513
if (unlikely(locked != _Q_SLOW_VAL)) {
@@ -547,7 +536,7 @@ __pv_queued_spin_unlock_slowpath(struct qspinlock *lock, u8 locked)
547536
* Now that we have a reference to the (likely) blocked pv_node,
548537
* release the lock.
549538
*/
550-
smp_store_release(&l->locked, 0);
539+
smp_store_release(&lock->locked, 0);
551540

552541
/*
553542
* At this point the memory pointed at by lock can be freed/reused,
@@ -573,15 +562,14 @@ __pv_queued_spin_unlock_slowpath(struct qspinlock *lock, u8 locked)
573562
#ifndef __pv_queued_spin_unlock
574563
__visible void __pv_queued_spin_unlock(struct qspinlock *lock)
575564
{
576-
struct __qspinlock *l = (void *)lock;
577565
u8 locked;
578566

579567
/*
580568
* We must not unlock if SLOW, because in that case we must first
581569
* unhash. Otherwise it would be possible to have multiple @lock
582570
* entries, which would be BAD.
583571
*/
584-
locked = cmpxchg_release(&l->locked, _Q_LOCKED_VAL, 0);
572+
locked = cmpxchg_release(&lock->locked, _Q_LOCKED_VAL, 0);
585573
if (likely(locked == _Q_LOCKED_VAL))
586574
return;
587575

0 commit comments

Comments
 (0)