Skip to content

Commit 25c00c5

Browse files
committed
mm, slub: use migrate_disable() on PREEMPT_RT
We currently use preempt_disable() (directly or via get_cpu_ptr()) to stabilize the pointer to kmem_cache_cpu. On PREEMPT_RT this would be incompatible with the list_lock spinlock. We can use migrate_disable() instead, but that increases overhead on !PREEMPT_RT as it's an unconditional function call. In order to get the best available mechanism on both PREEMPT_RT and !PREEMPT_RT, introduce private slub_get_cpu_ptr() and slub_put_cpu_ptr() wrappers and use them. Signed-off-by: Vlastimil Babka <[email protected]>
1 parent e0a043a commit 25c00c5

File tree

1 file changed

+30
-9
lines changed

1 file changed

+30
-9
lines changed

mm/slub.c

Lines changed: 30 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -118,6 +118,26 @@
118118
* the fast path and disables lockless freelists.
119119
*/
120120

121+
/*
122+
* We could simply use migrate_disable()/enable() but as long as it's a
123+
* function call even on !PREEMPT_RT, use inline preempt_disable() there.
124+
*/
125+
#ifndef CONFIG_PREEMPT_RT
126+
#define slub_get_cpu_ptr(var) get_cpu_ptr(var)
127+
#define slub_put_cpu_ptr(var) put_cpu_ptr(var)
128+
#else
129+
#define slub_get_cpu_ptr(var) \
130+
({ \
131+
migrate_disable(); \
132+
this_cpu_ptr(var); \
133+
})
134+
#define slub_put_cpu_ptr(var) \
135+
do { \
136+
(void)(var); \
137+
migrate_enable(); \
138+
} while (0)
139+
#endif
140+
121141
#ifdef CONFIG_SLUB_DEBUG
122142
#ifdef CONFIG_SLUB_DEBUG_ON
123143
DEFINE_STATIC_KEY_TRUE(slub_debug_enabled);
@@ -2852,7 +2872,7 @@ static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
28522872
if (unlikely(!pfmemalloc_match_unsafe(page, gfpflags)))
28532873
goto deactivate_slab;
28542874

2855-
/* must check again c->page in case IRQ handler changed it */
2875+
/* must check again c->page in case we got preempted and it changed */
28562876
local_irq_save(flags);
28572877
if (unlikely(page != c->page)) {
28582878
local_irq_restore(flags);
@@ -2911,7 +2931,8 @@ static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
29112931
}
29122932
if (unlikely(!slub_percpu_partial(c))) {
29132933
local_irq_restore(flags);
2914-
goto new_objects; /* stolen by an IRQ handler */
2934+
/* we were preempted and partial list got empty */
2935+
goto new_objects;
29152936
}
29162937

29172938
page = c->page = slub_percpu_partial(c);
@@ -2927,9 +2948,9 @@ static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
29272948
if (freelist)
29282949
goto check_new_page;
29292950

2930-
put_cpu_ptr(s->cpu_slab);
2951+
slub_put_cpu_ptr(s->cpu_slab);
29312952
page = new_slab(s, gfpflags, node);
2932-
c = get_cpu_ptr(s->cpu_slab);
2953+
c = slub_get_cpu_ptr(s->cpu_slab);
29332954

29342955
if (unlikely(!page)) {
29352956
slab_out_of_memory(s, gfpflags, node);
@@ -3012,12 +3033,12 @@ static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
30123033
* cpu before disabling preemption. Need to reload cpu area
30133034
* pointer.
30143035
*/
3015-
c = get_cpu_ptr(s->cpu_slab);
3036+
c = slub_get_cpu_ptr(s->cpu_slab);
30163037
#endif
30173038

30183039
p = ___slab_alloc(s, gfpflags, node, addr, c);
30193040
#ifdef CONFIG_PREEMPT_COUNT
3020-
put_cpu_ptr(s->cpu_slab);
3041+
slub_put_cpu_ptr(s->cpu_slab);
30213042
#endif
30223043
return p;
30233044
}
@@ -3546,7 +3567,7 @@ int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
35463567
* IRQs, which protects against PREEMPT and interrupts
35473568
* handlers invoking normal fastpath.
35483569
*/
3549-
c = get_cpu_ptr(s->cpu_slab);
3570+
c = slub_get_cpu_ptr(s->cpu_slab);
35503571
local_irq_disable();
35513572

35523573
for (i = 0; i < size; i++) {
@@ -3592,7 +3613,7 @@ int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
35923613
}
35933614
c->tid = next_tid(c->tid);
35943615
local_irq_enable();
3595-
put_cpu_ptr(s->cpu_slab);
3616+
slub_put_cpu_ptr(s->cpu_slab);
35963617

35973618
/*
35983619
* memcg and kmem_cache debug support and memory initialization.
@@ -3602,7 +3623,7 @@ int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
36023623
slab_want_init_on_alloc(flags, s));
36033624
return i;
36043625
error:
3605-
put_cpu_ptr(s->cpu_slab);
3626+
slub_put_cpu_ptr(s->cpu_slab);
36063627
slab_post_alloc_hook(s, objcg, flags, i, p, false);
36073628
__kmem_cache_free_bulk(s, i, p);
36083629
return 0;

0 commit comments

Comments
 (0)