Skip to content

Commit b4c3cdc

Browse files
npigginmpe
authored andcommitted
powerpc/qspinlock: allow stealing when head of queue yields
If the head of queue is preventing stealing but it finds the owner vCPU is preempted, it will yield its cycles to the owner which could cause it to become preempted. Add an option to re-allow stealers before yielding, and disallow them again after returning from the yield. Disable this option by default for now, i.e., no logical change. Signed-off-by: Nicholas Piggin <[email protected]> Signed-off-by: Michael Ellerman <[email protected]> Link: https://lore.kernel.org/r/[email protected]
1 parent bd48287 commit b4c3cdc

File tree

1 file changed

+56
-3
lines changed

1 file changed

+56
-3
lines changed

arch/powerpc/lib/qspinlock.c

Lines changed: 56 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -26,6 +26,7 @@ static bool maybe_stealers __read_mostly = true;
2626
static int head_spins __read_mostly = (1 << 8);
2727

2828
static bool pv_yield_owner __read_mostly = true;
29+
static bool pv_yield_allow_steal __read_mostly = false;
2930
static bool pv_yield_prev __read_mostly = true;
3031

3132
static DEFINE_PER_CPU_ALIGNED(struct qnodes, qnodes);
@@ -135,6 +136,22 @@ static __always_inline u32 set_mustq(struct qspinlock *lock)
135136
return prev;
136137
}
137138

139+
static __always_inline u32 clear_mustq(struct qspinlock *lock)
140+
{
141+
u32 prev;
142+
143+
asm volatile(
144+
"1: lwarx %0,0,%1 # clear_mustq \n"
145+
" andc %0,%0,%2 \n"
146+
" stwcx. %0,0,%1 \n"
147+
" bne- 1b \n"
148+
: "=&r" (prev)
149+
: "r" (&lock->val), "r" (_Q_MUST_Q_VAL)
150+
: "cr0", "memory");
151+
152+
return prev;
153+
}
154+
138155
static struct qnode *get_tail_qnode(struct qspinlock *lock, u32 val)
139156
{
140157
int cpu = decode_tail_cpu(val);
@@ -159,7 +176,7 @@ static struct qnode *get_tail_qnode(struct qspinlock *lock, u32 val)
159176
BUG();
160177
}
161178

162-
static __always_inline void yield_to_locked_owner(struct qspinlock *lock, u32 val, bool paravirt)
179+
static __always_inline void __yield_to_locked_owner(struct qspinlock *lock, u32 val, bool paravirt, bool mustq)
163180
{
164181
int owner;
165182
u32 yield_count;
@@ -188,14 +205,33 @@ static __always_inline void yield_to_locked_owner(struct qspinlock *lock, u32 va
188205
smp_rmb();
189206

190207
if (READ_ONCE(lock->val) == val) {
208+
if (mustq)
209+
clear_mustq(lock);
191210
yield_to_preempted(owner, yield_count);
211+
if (mustq)
212+
set_mustq(lock);
192213
/* Don't relax if we yielded. Maybe we should? */
193214
return;
194215
}
195216
relax:
196217
cpu_relax();
197218
}
198219

220+
static __always_inline void yield_to_locked_owner(struct qspinlock *lock, u32 val, bool paravirt)
221+
{
222+
__yield_to_locked_owner(lock, val, paravirt, false);
223+
}
224+
225+
static __always_inline void yield_head_to_locked_owner(struct qspinlock *lock, u32 val, bool paravirt)
226+
{
227+
bool mustq = false;
228+
229+
if ((val & _Q_MUST_Q_VAL) && pv_yield_allow_steal)
230+
mustq = true;
231+
232+
__yield_to_locked_owner(lock, val, paravirt, mustq);
233+
}
234+
199235
static __always_inline void yield_to_prev(struct qspinlock *lock, struct qnode *node, u32 val, bool paravirt)
200236
{
201237
int prev_cpu = decode_tail_cpu(val);
@@ -211,7 +247,7 @@ static __always_inline void yield_to_prev(struct qspinlock *lock, struct qnode *
211247
if ((yield_count & 1) == 0)
212248
goto relax; /* owner vcpu is running */
213249

214-
smp_rmb(); /* See yield_to_locked_owner comment */
250+
smp_rmb(); /* See __yield_to_locked_owner comment */
215251

216252
if (!node->locked) {
217253
yield_to_preempted(prev_cpu, yield_count);
@@ -308,7 +344,7 @@ static __always_inline void queued_spin_lock_mcs_queue(struct qspinlock *lock, b
308344
if (!(val & _Q_LOCKED_VAL))
309345
break;
310346

311-
yield_to_locked_owner(lock, val, paravirt);
347+
yield_head_to_locked_owner(lock, val, paravirt);
312348
if (!maybe_stealers)
313349
continue;
314350
iters++;
@@ -444,6 +480,22 @@ static int pv_yield_owner_get(void *data, u64 *val)
444480

445481
DEFINE_SIMPLE_ATTRIBUTE(fops_pv_yield_owner, pv_yield_owner_get, pv_yield_owner_set, "%llu\n");
446482

483+
static int pv_yield_allow_steal_set(void *data, u64 val)
484+
{
485+
pv_yield_allow_steal = !!val;
486+
487+
return 0;
488+
}
489+
490+
static int pv_yield_allow_steal_get(void *data, u64 *val)
491+
{
492+
*val = pv_yield_allow_steal;
493+
494+
return 0;
495+
}
496+
497+
DEFINE_SIMPLE_ATTRIBUTE(fops_pv_yield_allow_steal, pv_yield_allow_steal_get, pv_yield_allow_steal_set, "%llu\n");
498+
447499
static int pv_yield_prev_set(void *data, u64 val)
448500
{
449501
pv_yield_prev = !!val;
@@ -466,6 +518,7 @@ static __init int spinlock_debugfs_init(void)
466518
debugfs_create_file("qspl_head_spins", 0600, arch_debugfs_dir, NULL, &fops_head_spins);
467519
if (is_shared_processor()) {
468520
debugfs_create_file("qspl_pv_yield_owner", 0600, arch_debugfs_dir, NULL, &fops_pv_yield_owner);
521+
debugfs_create_file("qspl_pv_yield_allow_steal", 0600, arch_debugfs_dir, NULL, &fops_pv_yield_allow_steal);
469522
debugfs_create_file("qspl_pv_yield_prev", 0600, arch_debugfs_dir, NULL, &fops_pv_yield_prev);
470523
}
471524

0 commit comments

Comments
 (0)