@@ -87,8 +87,6 @@ struct pv_node {
87
87
#define queued_spin_trylock (l ) pv_hybrid_queued_unfair_trylock(l)
88
88
static inline bool pv_hybrid_queued_unfair_trylock (struct qspinlock * lock )
89
89
{
90
- struct __qspinlock * l = (void * )lock ;
91
-
92
90
/*
93
91
* Stay in unfair lock mode as long as queued mode waiters are
94
92
* present in the MCS wait queue but the pending bit isn't set.
@@ -97,7 +95,7 @@ static inline bool pv_hybrid_queued_unfair_trylock(struct qspinlock *lock)
97
95
int val = atomic_read (& lock -> val );
98
96
99
97
if (!(val & _Q_LOCKED_PENDING_MASK ) &&
100
- (cmpxchg_acquire (& l -> locked , 0 , _Q_LOCKED_VAL ) == 0 )) {
98
+ (cmpxchg_acquire (& lock -> locked , 0 , _Q_LOCKED_VAL ) == 0 )) {
101
99
qstat_inc (qstat_pv_lock_stealing , true);
102
100
return true;
103
101
}
@@ -117,16 +115,12 @@ static inline bool pv_hybrid_queued_unfair_trylock(struct qspinlock *lock)
117
115
#if _Q_PENDING_BITS == 8
118
116
static __always_inline void set_pending (struct qspinlock * lock )
119
117
{
120
- struct __qspinlock * l = (void * )lock ;
121
-
122
- WRITE_ONCE (l -> pending , 1 );
118
+ WRITE_ONCE (lock -> pending , 1 );
123
119
}
124
120
125
121
static __always_inline void clear_pending (struct qspinlock * lock )
126
122
{
127
- struct __qspinlock * l = (void * )lock ;
128
-
129
- WRITE_ONCE (l -> pending , 0 );
123
+ WRITE_ONCE (lock -> pending , 0 );
130
124
}
131
125
132
126
/*
@@ -136,10 +130,8 @@ static __always_inline void clear_pending(struct qspinlock *lock)
136
130
*/
137
131
static __always_inline int trylock_clear_pending (struct qspinlock * lock )
138
132
{
139
- struct __qspinlock * l = (void * )lock ;
140
-
141
- return !READ_ONCE (l -> locked ) &&
142
- (cmpxchg_acquire (& l -> locked_pending , _Q_PENDING_VAL ,
133
+ return !READ_ONCE (lock -> locked ) &&
134
+ (cmpxchg_acquire (& lock -> locked_pending , _Q_PENDING_VAL ,
143
135
_Q_LOCKED_VAL ) == _Q_PENDING_VAL );
144
136
}
145
137
#else /* _Q_PENDING_BITS == 8 */
@@ -384,7 +376,6 @@ static void pv_wait_node(struct mcs_spinlock *node, struct mcs_spinlock *prev)
384
376
static void pv_kick_node (struct qspinlock * lock , struct mcs_spinlock * node )
385
377
{
386
378
struct pv_node * pn = (struct pv_node * )node ;
387
- struct __qspinlock * l = (void * )lock ;
388
379
389
380
/*
390
381
* If the vCPU is indeed halted, advance its state to match that of
@@ -413,7 +404,7 @@ static void pv_kick_node(struct qspinlock *lock, struct mcs_spinlock *node)
413
404
* the hash table later on at unlock time, no atomic instruction is
414
405
* needed.
415
406
*/
416
- WRITE_ONCE (l -> locked , _Q_SLOW_VAL );
407
+ WRITE_ONCE (lock -> locked , _Q_SLOW_VAL );
417
408
(void )pv_hash (lock , pn );
418
409
}
419
410
@@ -428,7 +419,6 @@ static u32
428
419
pv_wait_head_or_lock (struct qspinlock * lock , struct mcs_spinlock * node )
429
420
{
430
421
struct pv_node * pn = (struct pv_node * )node ;
431
- struct __qspinlock * l = (void * )lock ;
432
422
struct qspinlock * * lp = NULL ;
433
423
int waitcnt = 0 ;
434
424
int loop ;
@@ -479,21 +469,21 @@ pv_wait_head_or_lock(struct qspinlock *lock, struct mcs_spinlock *node)
479
469
*
480
470
* Matches the smp_rmb() in __pv_queued_spin_unlock().
481
471
*/
482
- if (xchg (& l -> locked , _Q_SLOW_VAL ) == 0 ) {
472
+ if (xchg (& lock -> locked , _Q_SLOW_VAL ) == 0 ) {
483
473
/*
484
474
* The lock was free and now we own the lock.
485
475
* Change the lock value back to _Q_LOCKED_VAL
486
476
* and unhash the table.
487
477
*/
488
- WRITE_ONCE (l -> locked , _Q_LOCKED_VAL );
478
+ WRITE_ONCE (lock -> locked , _Q_LOCKED_VAL );
489
479
WRITE_ONCE (* lp , NULL );
490
480
goto gotlock ;
491
481
}
492
482
}
493
483
WRITE_ONCE (pn -> state , vcpu_hashed );
494
484
qstat_inc (qstat_pv_wait_head , true);
495
485
qstat_inc (qstat_pv_wait_again , waitcnt );
496
- pv_wait (& l -> locked , _Q_SLOW_VAL );
486
+ pv_wait (& lock -> locked , _Q_SLOW_VAL );
497
487
498
488
/*
499
489
* Because of lock stealing, the queue head vCPU may not be
@@ -518,7 +508,6 @@ pv_wait_head_or_lock(struct qspinlock *lock, struct mcs_spinlock *node)
518
508
__visible void
519
509
__pv_queued_spin_unlock_slowpath (struct qspinlock * lock , u8 locked )
520
510
{
521
- struct __qspinlock * l = (void * )lock ;
522
511
struct pv_node * node ;
523
512
524
513
if (unlikely (locked != _Q_SLOW_VAL )) {
@@ -547,7 +536,7 @@ __pv_queued_spin_unlock_slowpath(struct qspinlock *lock, u8 locked)
547
536
* Now that we have a reference to the (likely) blocked pv_node,
548
537
* release the lock.
549
538
*/
550
- smp_store_release (& l -> locked , 0 );
539
+ smp_store_release (& lock -> locked , 0 );
551
540
552
541
/*
553
542
* At this point the memory pointed at by lock can be freed/reused,
@@ -573,15 +562,14 @@ __pv_queued_spin_unlock_slowpath(struct qspinlock *lock, u8 locked)
573
562
#ifndef __pv_queued_spin_unlock
574
563
__visible void __pv_queued_spin_unlock (struct qspinlock * lock )
575
564
{
576
- struct __qspinlock * l = (void * )lock ;
577
565
u8 locked ;
578
566
579
567
/*
580
568
* We must not unlock if SLOW, because in that case we must first
581
569
* unhash. Otherwise it would be possible to have multiple @lock
582
570
* entries, which would be BAD.
583
571
*/
584
- locked = cmpxchg_release (& l -> locked , _Q_LOCKED_VAL , 0 );
572
+ locked = cmpxchg_release (& lock -> locked , _Q_LOCKED_VAL , 0 );
585
573
if (likely (locked == _Q_LOCKED_VAL ))
586
574
return ;
587
575
0 commit comments