Skip to content

Commit d1d71b3

Browse files
author
Kent Overstreet
committed
sched.h: Move (spin|rwlock)_needbreak() to spinlock.h
This lets us kill the dependency on spinlock.h. Signed-off-by: Kent Overstreet <[email protected]>
1 parent d7a73e3 commit d1d71b3

File tree

2 files changed

+31
-31
lines changed

2 files changed

+31
-31
lines changed

include/linux/sched.h

Lines changed: 0 additions & 31 deletions
Original file line numberDiff line numberDiff line change
@@ -2227,37 +2227,6 @@ static inline bool preempt_model_preemptible(void)
22272227
return preempt_model_full() || preempt_model_rt();
22282228
}
22292229

2230-
/*
2231-
* Does a critical section need to be broken due to another
2232-
* task waiting?: (technically does not depend on CONFIG_PREEMPTION,
2233-
* but a general need for low latency)
2234-
*/
2235-
static inline int spin_needbreak(spinlock_t *lock)
2236-
{
2237-
#ifdef CONFIG_PREEMPTION
2238-
return spin_is_contended(lock);
2239-
#else
2240-
return 0;
2241-
#endif
2242-
}
2243-
2244-
/*
2245-
* Check if a rwlock is contended.
2246-
* Returns non-zero if there is another task waiting on the rwlock.
2247-
* Returns zero if the lock is not contended or the system / underlying
2248-
* rwlock implementation does not support contention detection.
2249-
* Technically does not depend on CONFIG_PREEMPTION, but a general need
2250-
* for low latency.
2251-
*/
2252-
static inline int rwlock_needbreak(rwlock_t *lock)
2253-
{
2254-
#ifdef CONFIG_PREEMPTION
2255-
return rwlock_is_contended(lock);
2256-
#else
2257-
return 0;
2258-
#endif
2259-
}
2260-
22612230
static __always_inline bool need_resched(void)
22622231
{
22632232
return unlikely(tif_need_resched());

include/linux/spinlock.h

Lines changed: 31 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -449,6 +449,37 @@ static __always_inline int spin_is_contended(spinlock_t *lock)
449449
return raw_spin_is_contended(&lock->rlock);
450450
}
451451

452+
/*
453+
* Does a critical section need to be broken due to another
454+
* task waiting?: (technically does not depend on CONFIG_PREEMPTION,
455+
* but a general need for low latency)
456+
*/
457+
static inline int spin_needbreak(spinlock_t *lock)
458+
{
459+
#ifdef CONFIG_PREEMPTION
460+
return spin_is_contended(lock);
461+
#else
462+
return 0;
463+
#endif
464+
}
465+
466+
/*
467+
* Check if a rwlock is contended.
468+
* Returns non-zero if there is another task waiting on the rwlock.
469+
* Returns zero if the lock is not contended or the system / underlying
470+
* rwlock implementation does not support contention detection.
471+
* Technically does not depend on CONFIG_PREEMPTION, but a general need
472+
* for low latency.
473+
*/
474+
static inline int rwlock_needbreak(rwlock_t *lock)
475+
{
476+
#ifdef CONFIG_PREEMPTION
477+
return rwlock_is_contended(lock);
478+
#else
479+
return 0;
480+
#endif
481+
}
482+
452483
#define assert_spin_locked(lock) assert_raw_spin_locked(&(lock)->rlock)
453484

454485
#else /* !CONFIG_PREEMPT_RT */

0 commit comments

Comments
 (0)