@@ -26,6 +26,7 @@ static bool maybe_stealers __read_mostly = true;
26
26
static int head_spins __read_mostly = (1 << 8 );
27
27
28
28
static bool pv_yield_owner __read_mostly = true;
29
+ static bool pv_yield_allow_steal __read_mostly = false;
29
30
static bool pv_yield_prev __read_mostly = true;
30
31
31
32
static DEFINE_PER_CPU_ALIGNED (struct qnodes , qnodes ) ;
@@ -135,6 +136,22 @@ static __always_inline u32 set_mustq(struct qspinlock *lock)
135
136
return prev ;
136
137
}
137
138
139
+ static __always_inline u32 clear_mustq (struct qspinlock * lock )
140
+ {
141
+ u32 prev ;
142
+
143
+ asm volatile (
144
+ "1: lwarx %0,0,%1 # clear_mustq \n"
145
+ " andc %0,%0,%2 \n"
146
+ " stwcx. %0,0,%1 \n"
147
+ " bne- 1b \n"
148
+ : "=&r" (prev )
149
+ : "r" (& lock -> val ), "r" (_Q_MUST_Q_VAL )
150
+ : "cr0" , "memory" );
151
+
152
+ return prev ;
153
+ }
154
+
138
155
static struct qnode * get_tail_qnode (struct qspinlock * lock , u32 val )
139
156
{
140
157
int cpu = decode_tail_cpu (val );
@@ -159,7 +176,7 @@ static struct qnode *get_tail_qnode(struct qspinlock *lock, u32 val)
159
176
BUG ();
160
177
}
161
178
162
- static __always_inline void yield_to_locked_owner (struct qspinlock * lock , u32 val , bool paravirt )
179
+ static __always_inline void __yield_to_locked_owner (struct qspinlock * lock , u32 val , bool paravirt , bool mustq )
163
180
{
164
181
int owner ;
165
182
u32 yield_count ;
@@ -188,14 +205,33 @@ static __always_inline void yield_to_locked_owner(struct qspinlock *lock, u32 va
188
205
smp_rmb ();
189
206
190
207
if (READ_ONCE (lock -> val ) == val ) {
208
+ if (mustq )
209
+ clear_mustq (lock );
191
210
yield_to_preempted (owner , yield_count );
211
+ if (mustq )
212
+ set_mustq (lock );
192
213
/* Don't relax if we yielded. Maybe we should? */
193
214
return ;
194
215
}
195
216
relax :
196
217
cpu_relax ();
197
218
}
198
219
220
+ static __always_inline void yield_to_locked_owner (struct qspinlock * lock , u32 val , bool paravirt )
221
+ {
222
+ __yield_to_locked_owner (lock , val , paravirt , false);
223
+ }
224
+
225
+ static __always_inline void yield_head_to_locked_owner (struct qspinlock * lock , u32 val , bool paravirt )
226
+ {
227
+ bool mustq = false;
228
+
229
+ if ((val & _Q_MUST_Q_VAL ) && pv_yield_allow_steal )
230
+ mustq = true;
231
+
232
+ __yield_to_locked_owner (lock , val , paravirt , mustq );
233
+ }
234
+
199
235
static __always_inline void yield_to_prev (struct qspinlock * lock , struct qnode * node , u32 val , bool paravirt )
200
236
{
201
237
int prev_cpu = decode_tail_cpu (val );
@@ -211,7 +247,7 @@ static __always_inline void yield_to_prev(struct qspinlock *lock, struct qnode *
211
247
if ((yield_count & 1 ) == 0 )
212
248
goto relax ; /* owner vcpu is running */
213
249
214
- smp_rmb (); /* See yield_to_locked_owner comment */
250
+ smp_rmb (); /* See __yield_to_locked_owner comment */
215
251
216
252
if (!node -> locked ) {
217
253
yield_to_preempted (prev_cpu , yield_count );
@@ -308,7 +344,7 @@ static __always_inline void queued_spin_lock_mcs_queue(struct qspinlock *lock, b
308
344
if (!(val & _Q_LOCKED_VAL ))
309
345
break ;
310
346
311
- yield_to_locked_owner (lock , val , paravirt );
347
+ yield_head_to_locked_owner (lock , val , paravirt );
312
348
if (!maybe_stealers )
313
349
continue ;
314
350
iters ++ ;
@@ -444,6 +480,22 @@ static int pv_yield_owner_get(void *data, u64 *val)
444
480
445
481
DEFINE_SIMPLE_ATTRIBUTE (fops_pv_yield_owner , pv_yield_owner_get , pv_yield_owner_set , "%llu\n" );
446
482
483
+ static int pv_yield_allow_steal_set (void * data , u64 val )
484
+ {
485
+ pv_yield_allow_steal = !!val ;
486
+
487
+ return 0 ;
488
+ }
489
+
490
+ static int pv_yield_allow_steal_get (void * data , u64 * val )
491
+ {
492
+ * val = pv_yield_allow_steal ;
493
+
494
+ return 0 ;
495
+ }
496
+
497
+ DEFINE_SIMPLE_ATTRIBUTE (fops_pv_yield_allow_steal , pv_yield_allow_steal_get , pv_yield_allow_steal_set , "%llu\n" );
498
+
447
499
static int pv_yield_prev_set (void * data , u64 val )
448
500
{
449
501
pv_yield_prev = !!val ;
@@ -466,6 +518,7 @@ static __init int spinlock_debugfs_init(void)
466
518
debugfs_create_file ("qspl_head_spins" , 0600 , arch_debugfs_dir , NULL , & fops_head_spins );
467
519
if (is_shared_processor ()) {
468
520
debugfs_create_file ("qspl_pv_yield_owner" , 0600 , arch_debugfs_dir , NULL , & fops_pv_yield_owner );
521
+ debugfs_create_file ("qspl_pv_yield_allow_steal" , 0600 , arch_debugfs_dir , NULL , & fops_pv_yield_allow_steal );
469
522
debugfs_create_file ("qspl_pv_yield_prev" , 0600 , arch_debugfs_dir , NULL , & fops_pv_yield_prev );
470
523
}
471
524
0 commit comments