118
118
* the fast path and disables lockless freelists.
119
119
*/
120
120
121
+ /*
122
+ * We could simply use migrate_disable()/enable() but as long as it's a
123
+ * function call even on !PREEMPT_RT, use inline preempt_disable() there.
124
+ */
125
+ #ifndef CONFIG_PREEMPT_RT
126
+ #define slub_get_cpu_ptr (var ) get_cpu_ptr(var)
127
+ #define slub_put_cpu_ptr (var ) put_cpu_ptr(var)
128
+ #else
129
+ #define slub_get_cpu_ptr (var ) \
130
+ ({ \
131
+ migrate_disable(); \
132
+ this_cpu_ptr(var); \
133
+ })
134
+ #define slub_put_cpu_ptr (var ) \
135
+ do { \
136
+ (void)(var); \
137
+ migrate_enable(); \
138
+ } while (0)
139
+ #endif
140
+
121
141
#ifdef CONFIG_SLUB_DEBUG
122
142
#ifdef CONFIG_SLUB_DEBUG_ON
123
143
DEFINE_STATIC_KEY_TRUE (slub_debug_enabled );
@@ -2852,7 +2872,7 @@ static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
2852
2872
if (unlikely (!pfmemalloc_match_unsafe (page , gfpflags )))
2853
2873
goto deactivate_slab ;
2854
2874
2855
- /* must check again c->page in case IRQ handler changed it */
2875
+ /* must check again c->page in case we got preempted and it changed */
2856
2876
local_irq_save (flags );
2857
2877
if (unlikely (page != c -> page )) {
2858
2878
local_irq_restore (flags );
@@ -2911,7 +2931,8 @@ static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
2911
2931
}
2912
2932
if (unlikely (!slub_percpu_partial (c ))) {
2913
2933
local_irq_restore (flags );
2914
- goto new_objects ; /* stolen by an IRQ handler */
2934
+ /* we were preempted and partial list got empty */
2935
+ goto new_objects ;
2915
2936
}
2916
2937
2917
2938
page = c -> page = slub_percpu_partial (c );
@@ -2927,9 +2948,9 @@ static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
2927
2948
if (freelist )
2928
2949
goto check_new_page ;
2929
2950
2930
- put_cpu_ptr (s -> cpu_slab );
2951
+ slub_put_cpu_ptr (s -> cpu_slab );
2931
2952
page = new_slab (s , gfpflags , node );
2932
- c = get_cpu_ptr (s -> cpu_slab );
2953
+ c = slub_get_cpu_ptr (s -> cpu_slab );
2933
2954
2934
2955
if (unlikely (!page )) {
2935
2956
slab_out_of_memory (s , gfpflags , node );
@@ -3012,12 +3033,12 @@ static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
3012
3033
* cpu before disabling preemption. Need to reload cpu area
3013
3034
* pointer.
3014
3035
*/
3015
- c = get_cpu_ptr (s -> cpu_slab );
3036
+ c = slub_get_cpu_ptr (s -> cpu_slab );
3016
3037
#endif
3017
3038
3018
3039
p = ___slab_alloc (s , gfpflags , node , addr , c );
3019
3040
#ifdef CONFIG_PREEMPT_COUNT
3020
- put_cpu_ptr (s -> cpu_slab );
3041
+ slub_put_cpu_ptr (s -> cpu_slab );
3021
3042
#endif
3022
3043
return p ;
3023
3044
}
@@ -3546,7 +3567,7 @@ int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
3546
3567
* IRQs, which protects against PREEMPT and interrupts
3547
3568
* handlers invoking normal fastpath.
3548
3569
*/
3549
- c = get_cpu_ptr (s -> cpu_slab );
3570
+ c = slub_get_cpu_ptr (s -> cpu_slab );
3550
3571
local_irq_disable ();
3551
3572
3552
3573
for (i = 0 ; i < size ; i ++ ) {
@@ -3592,7 +3613,7 @@ int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
3592
3613
}
3593
3614
c -> tid = next_tid (c -> tid );
3594
3615
local_irq_enable ();
3595
- put_cpu_ptr (s -> cpu_slab );
3616
+ slub_put_cpu_ptr (s -> cpu_slab );
3596
3617
3597
3618
/*
3598
3619
* memcg and kmem_cache debug support and memory initialization.
@@ -3602,7 +3623,7 @@ int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
3602
3623
slab_want_init_on_alloc (flags , s ));
3603
3624
return i ;
3604
3625
error :
3605
- put_cpu_ptr (s -> cpu_slab );
3626
+ slub_put_cpu_ptr (s -> cpu_slab );
3606
3627
slab_post_alloc_hook (s , objcg , flags , i , p , false);
3607
3628
__kmem_cache_free_bulk (s , i , p );
3608
3629
return 0 ;
0 commit comments