Skip to content

Commit 342a932

Browse files
KAGA-KOKOIngo Molnar
authored andcommitted
locking/spinlock: Provide RT variant header: <linux/spinlock_rt.h>
Provide the necessary wrappers around the actual rtmutex based spinlock implementation. Signed-off-by: Thomas Gleixner <[email protected]> Signed-off-by: Peter Zijlstra (Intel) <[email protected]> Signed-off-by: Ingo Molnar <[email protected]> Link: https://lore.kernel.org/r/[email protected]
1 parent 051790e commit 342a932

File tree

3 files changed

+162
-1
lines changed

3 files changed

+162
-1
lines changed

include/linux/spinlock.h

Lines changed: 10 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -312,8 +312,10 @@ static inline void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock)
312312
1 : ({ local_irq_restore(flags); 0; }); \
313313
})
314314

315-
/* Include rwlock functions */
315+
#ifndef CONFIG_PREEMPT_RT
316+
/* Include rwlock functions for !RT */
316317
#include <linux/rwlock.h>
318+
#endif
317319

318320
/*
319321
* Pull the _spin_*()/_read_*()/_write_*() functions/declarations:
@@ -324,6 +326,9 @@ static inline void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock)
324326
# include <linux/spinlock_api_up.h>
325327
#endif
326328

329+
/* Non PREEMPT_RT kernel, map to raw spinlocks: */
330+
#ifndef CONFIG_PREEMPT_RT
331+
327332
/*
328333
* Map the spin_lock functions to the raw variants for PREEMPT_RT=n
329334
*/
@@ -458,6 +463,10 @@ static __always_inline int spin_is_contended(spinlock_t *lock)
458463

459464
#define assert_spin_locked(lock) assert_raw_spin_locked(&(lock)->rlock)
460465

466+
#else /* !CONFIG_PREEMPT_RT */
467+
# include <linux/spinlock_rt.h>
468+
#endif /* CONFIG_PREEMPT_RT */
469+
461470
/*
462471
* Pull the atomic_t declaration:
463472
* (asm-mips/atomic.h needs above definitions)

include/linux/spinlock_api_smp.h

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -187,6 +187,9 @@ static inline int __raw_spin_trylock_bh(raw_spinlock_t *lock)
187187
return 0;
188188
}
189189

190+
/* PREEMPT_RT has its own rwlock implementation */
191+
#ifndef CONFIG_PREEMPT_RT
190192
#include <linux/rwlock_api_smp.h>
193+
#endif
191194

192195
#endif /* __LINUX_SPINLOCK_API_SMP_H */

include/linux/spinlock_rt.h

Lines changed: 149 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,149 @@
1+
// SPDX-License-Identifier: GPL-2.0-only
2+
#ifndef __LINUX_SPINLOCK_RT_H
3+
#define __LINUX_SPINLOCK_RT_H
4+
5+
#ifndef __LINUX_SPINLOCK_H
6+
#error Do not include directly. Use spinlock.h
7+
#endif
8+
9+
#ifdef CONFIG_DEBUG_LOCK_ALLOC
10+
extern void __rt_spin_lock_init(spinlock_t *lock, const char *name,
11+
struct lock_class_key *key);
12+
#else
13+
static inline void __rt_spin_lock_init(spinlock_t *lock, const char *name,
14+
struct lock_class_key *key)
15+
{
16+
}
17+
#endif
18+
19+
#define spin_lock_init(slock) \
20+
do { \
21+
static struct lock_class_key __key; \
22+
\
23+
rt_mutex_base_init(&(slock)->lock); \
24+
__rt_spin_lock_init(slock, #slock, &__key); \
25+
} while (0)
26+
27+
extern void rt_spin_lock(spinlock_t *lock);
28+
extern void rt_spin_lock_nested(spinlock_t *lock, int subclass);
29+
extern void rt_spin_lock_nest_lock(spinlock_t *lock, struct lockdep_map *nest_lock);
30+
extern void rt_spin_unlock(spinlock_t *lock);
31+
extern void rt_spin_lock_unlock(spinlock_t *lock);
32+
extern int rt_spin_trylock_bh(spinlock_t *lock);
33+
extern int rt_spin_trylock(spinlock_t *lock);
34+
35+
static __always_inline void spin_lock(spinlock_t *lock)
36+
{
37+
rt_spin_lock(lock);
38+
}
39+
40+
#ifdef CONFIG_LOCKDEP
41+
# define __spin_lock_nested(lock, subclass) \
42+
rt_spin_lock_nested(lock, subclass)
43+
44+
# define __spin_lock_nest_lock(lock, nest_lock) \
45+
do { \
46+
typecheck(struct lockdep_map *, &(nest_lock)->dep_map); \
47+
rt_spin_lock_nest_lock(lock, &(nest_lock)->dep_map); \
48+
} while (0)
49+
# define __spin_lock_irqsave_nested(lock, flags, subclass) \
50+
do { \
51+
typecheck(unsigned long, flags); \
52+
flags = 0; \
53+
__spin_lock_nested(lock, subclass); \
54+
} while (0)
55+
56+
#else
57+
/*
58+
* Always evaluate the 'subclass' argument to avoid that the compiler
59+
* warns about set-but-not-used variables when building with
60+
* CONFIG_DEBUG_LOCK_ALLOC=n and with W=1.
61+
*/
62+
# define __spin_lock_nested(lock, subclass) spin_lock(((void)(subclass), (lock)))
63+
# define __spin_lock_nest_lock(lock, subclass) spin_lock(((void)(subclass), (lock)))
64+
# define __spin_lock_irqsave_nested(lock, flags, subclass) \
65+
spin_lock_irqsave(((void)(subclass), (lock)), flags)
66+
#endif
67+
68+
#define spin_lock_nested(lock, subclass) \
69+
__spin_lock_nested(lock, subclass)
70+
71+
#define spin_lock_nest_lock(lock, nest_lock) \
72+
__spin_lock_nest_lock(lock, nest_lock)
73+
74+
#define spin_lock_irqsave_nested(lock, flags, subclass) \
75+
__spin_lock_irqsave_nested(lock, flags, subclass)
76+
77+
static __always_inline void spin_lock_bh(spinlock_t *lock)
78+
{
79+
/* Investigate: Drop bh when blocking ? */
80+
local_bh_disable();
81+
rt_spin_lock(lock);
82+
}
83+
84+
static __always_inline void spin_lock_irq(spinlock_t *lock)
85+
{
86+
rt_spin_lock(lock);
87+
}
88+
89+
#define spin_lock_irqsave(lock, flags) \
90+
do { \
91+
typecheck(unsigned long, flags); \
92+
flags = 0; \
93+
spin_lock(lock); \
94+
} while (0)
95+
96+
static __always_inline void spin_unlock(spinlock_t *lock)
97+
{
98+
rt_spin_unlock(lock);
99+
}
100+
101+
static __always_inline void spin_unlock_bh(spinlock_t *lock)
102+
{
103+
rt_spin_unlock(lock);
104+
local_bh_enable();
105+
}
106+
107+
static __always_inline void spin_unlock_irq(spinlock_t *lock)
108+
{
109+
rt_spin_unlock(lock);
110+
}
111+
112+
static __always_inline void spin_unlock_irqrestore(spinlock_t *lock,
113+
unsigned long flags)
114+
{
115+
rt_spin_unlock(lock);
116+
}
117+
118+
#define spin_trylock(lock) \
119+
__cond_lock(lock, rt_spin_trylock(lock))
120+
121+
#define spin_trylock_bh(lock) \
122+
__cond_lock(lock, rt_spin_trylock_bh(lock))
123+
124+
#define spin_trylock_irq(lock) \
125+
__cond_lock(lock, rt_spin_trylock(lock))
126+
127+
#define __spin_trylock_irqsave(lock, flags) \
128+
({ \
129+
int __locked; \
130+
\
131+
typecheck(unsigned long, flags); \
132+
flags = 0; \
133+
__locked = spin_trylock(lock); \
134+
__locked; \
135+
})
136+
137+
#define spin_trylock_irqsave(lock, flags) \
138+
__cond_lock(lock, __spin_trylock_irqsave(lock, flags))
139+
140+
#define spin_is_contended(lock) (((void)(lock), 0))
141+
142+
static inline int spin_is_locked(spinlock_t *lock)
143+
{
144+
return rt_mutex_base_is_locked(&lock->lock);
145+
}
146+
147+
#define assert_spin_locked(lock) BUG_ON(!spin_is_locked(lock))
148+
149+
#endif

0 commit comments

Comments
 (0)