@@ -414,17 +414,54 @@ static inline bool rwsem_can_spin_on_owner(struct rw_semaphore *sem)
414
414
}
415
415
416
416
/*
417
- * Return true only if we can still spin on the owner field of the rwsem.
417
+ * The rwsem_spin_on_owner() function returns the folowing 4 values
418
+ * depending on the lock owner state.
419
+ * OWNER_NULL : owner is currently NULL
420
+ * OWNER_WRITER: when owner changes and is a writer
421
+ * OWNER_READER: when owner changes and the new owner may be a reader.
422
+ * OWNER_NONSPINNABLE:
423
+ * when optimistic spinning has to stop because either the
424
+ * owner stops running, is unknown, or its timeslice has
425
+ * been used up.
418
426
*/
419
- static noinline bool rwsem_spin_on_owner (struct rw_semaphore * sem )
427
+ enum owner_state {
428
+ OWNER_NULL = 1 << 0 ,
429
+ OWNER_WRITER = 1 << 1 ,
430
+ OWNER_READER = 1 << 2 ,
431
+ OWNER_NONSPINNABLE = 1 << 3 ,
432
+ };
433
+ #define OWNER_SPINNABLE (OWNER_NULL | OWNER_WRITER)
434
+
435
+ static inline enum owner_state rwsem_owner_state (unsigned long owner )
420
436
{
421
- struct task_struct * owner = READ_ONCE (sem -> owner );
437
+ if (!owner )
438
+ return OWNER_NULL ;
422
439
423
- if (!is_rwsem_owner_spinnable (owner ))
424
- return false;
440
+ if (owner & RWSEM_ANONYMOUSLY_OWNED )
441
+ return OWNER_NONSPINNABLE ;
442
+
443
+ if (owner & RWSEM_READER_OWNED )
444
+ return OWNER_READER ;
445
+
446
+ return OWNER_WRITER ;
447
+ }
448
+
449
+ static noinline enum owner_state rwsem_spin_on_owner (struct rw_semaphore * sem )
450
+ {
451
+ struct task_struct * tmp , * owner = READ_ONCE (sem -> owner );
452
+ enum owner_state state = rwsem_owner_state ((unsigned long )owner );
453
+
454
+ if (state != OWNER_WRITER )
455
+ return state ;
425
456
426
457
rcu_read_lock ();
427
- while (owner && (READ_ONCE (sem -> owner ) == owner )) {
458
+ for (;;) {
459
+ tmp = READ_ONCE (sem -> owner );
460
+ if (tmp != owner ) {
461
+ state = rwsem_owner_state ((unsigned long )tmp );
462
+ break ;
463
+ }
464
+
428
465
/*
429
466
* Ensure we emit the owner->on_cpu, dereference _after_
430
467
* checking sem->owner still matches owner, if that fails,
@@ -433,24 +470,16 @@ static noinline bool rwsem_spin_on_owner(struct rw_semaphore *sem)
433
470
*/
434
471
barrier ();
435
472
436
- /*
437
- * abort spinning when need_resched or owner is not running or
438
- * owner's cpu is preempted.
439
- */
440
473
if (need_resched () || !owner_on_cpu (owner )) {
441
- rcu_read_unlock () ;
442
- return false ;
474
+ state = OWNER_NONSPINNABLE ;
475
+ break ;
443
476
}
444
477
445
478
cpu_relax ();
446
479
}
447
480
rcu_read_unlock ();
448
481
449
- /*
450
- * If there is a new owner or the owner is not set, we continue
451
- * spinning.
452
- */
453
- return is_rwsem_owner_spinnable (READ_ONCE (sem -> owner ));
482
+ return state ;
454
483
}
455
484
456
485
static bool rwsem_optimistic_spin (struct rw_semaphore * sem )
@@ -473,7 +502,7 @@ static bool rwsem_optimistic_spin(struct rw_semaphore *sem)
473
502
* 2) readers own the lock as we can't determine if they are
474
503
* actively running or not.
475
504
*/
476
- while (rwsem_spin_on_owner (sem )) {
505
+ while (rwsem_spin_on_owner (sem ) & OWNER_SPINNABLE ) {
477
506
/*
478
507
* Try to acquire the lock
479
508
*/
0 commit comments