Skip to content

Commit 5e4823e

Browse files
committed
Merge tag 'locking_urgent_for_v5.19' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull locking fix from Borislav Petkov: - Avoid rwsem lockups in certain situations when handling the handoff bit * tag 'locking_urgent_for_v5.19' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: locking/rwsem: Allow slowpath writer to ignore handoff bit if not set by first waiter
2 parents cd2715b + 6eebd5f commit 5e4823e

File tree

1 file changed

+20
-10
lines changed

1 file changed

+20
-10
lines changed

kernel/locking/rwsem.c

Lines changed: 20 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -335,8 +335,6 @@ struct rwsem_waiter {
335335
struct task_struct *task;
336336
enum rwsem_waiter_type type;
337337
unsigned long timeout;
338-
339-
/* Writer only, not initialized in reader */
340338
bool handoff_set;
341339
};
342340
#define rwsem_first_waiter(sem) \
@@ -459,10 +457,12 @@ static void rwsem_mark_wake(struct rw_semaphore *sem,
459457
* to give up the lock), request a HANDOFF to
460458
* force the issue.
461459
*/
462-
if (!(oldcount & RWSEM_FLAG_HANDOFF) &&
463-
time_after(jiffies, waiter->timeout)) {
464-
adjustment -= RWSEM_FLAG_HANDOFF;
465-
lockevent_inc(rwsem_rlock_handoff);
460+
if (time_after(jiffies, waiter->timeout)) {
461+
if (!(oldcount & RWSEM_FLAG_HANDOFF)) {
462+
adjustment -= RWSEM_FLAG_HANDOFF;
463+
lockevent_inc(rwsem_rlock_handoff);
464+
}
465+
waiter->handoff_set = true;
466466
}
467467

468468
atomic_long_add(-adjustment, &sem->count);
@@ -599,7 +599,7 @@ rwsem_del_wake_waiter(struct rw_semaphore *sem, struct rwsem_waiter *waiter,
599599
static inline bool rwsem_try_write_lock(struct rw_semaphore *sem,
600600
struct rwsem_waiter *waiter)
601601
{
602-
bool first = rwsem_first_waiter(sem) == waiter;
602+
struct rwsem_waiter *first = rwsem_first_waiter(sem);
603603
long count, new;
604604

605605
lockdep_assert_held(&sem->wait_lock);
@@ -609,11 +609,20 @@ static inline bool rwsem_try_write_lock(struct rw_semaphore *sem,
609609
bool has_handoff = !!(count & RWSEM_FLAG_HANDOFF);
610610

611611
if (has_handoff) {
612-
if (!first)
612+
/*
613+
* Honor handoff bit and yield only when the first
614+
* waiter is the one that set it. Otherwisee, we
615+
* still try to acquire the rwsem.
616+
*/
617+
if (first->handoff_set && (waiter != first))
613618
return false;
614619

615-
/* First waiter inherits a previously set handoff bit */
616-
waiter->handoff_set = true;
620+
/*
621+
* First waiter can inherit a previously set handoff
622+
* bit and spin on rwsem if lock acquisition fails.
623+
*/
624+
if (waiter == first)
625+
waiter->handoff_set = true;
617626
}
618627

619628
new = count;
@@ -1027,6 +1036,7 @@ rwsem_down_read_slowpath(struct rw_semaphore *sem, long count, unsigned int stat
10271036
waiter.task = current;
10281037
waiter.type = RWSEM_WAITING_FOR_READ;
10291038
waiter.timeout = jiffies + RWSEM_WAIT_TIMEOUT;
1039+
waiter.handoff_set = false;
10301040

10311041
raw_spin_lock_irq(&sem->wait_lock);
10321042
if (list_empty(&sem->wait_list)) {

0 commit comments

Comments
 (0)