Skip to content

Commit a2b4ae8

Browse files
committed
mm, slub: make slab_lock() disable irqs with PREEMPT_RT
We need to disable irqs around slab_lock() (a bit spinlock) to make it irq-safe. Most calls to slab_lock() are nested under spin_lock_irqsave() which doesn't disable irqs on PREEMPT_RT, so add explicit disabling with PREEMPT_RT. The exception is cmpxchg_double_slab() which already disables irqs, so use a __slab_[un]lock() variant without irq disable there. slab_[un]lock() thus needs a flags pointer parameter, which is unused on !RT. free_debug_processing() now has two flags variables, which looks odd, but only one is actually used - the one used in spin_lock_irqsave() on !RT and the one used in slab_lock() on RT. As a result, __cmpxchg_double_slab() and cmpxchg_double_slab() become effectively identical on RT, as both will disable irqs, which is necessary on RT as most callers of this function also rely on irqsaving lock operations. Thus, assert that irqs are already disabled in __cmpxchg_double_slab() only on !RT and also change the VM_BUG_ON assertion to the more standard lockdep_assert one. Signed-off-by: Vlastimil Babka <[email protected]>
1 parent 94ef030 commit a2b4ae8

File tree

1 file changed

+41
-17
lines changed

1 file changed

+41
-17
lines changed

mm/slub.c

Lines changed: 41 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -359,25 +359,44 @@ static inline unsigned int oo_objects(struct kmem_cache_order_objects x)
359359
/*
360360
* Per slab locking using the pagelock
361361
*/
362-
static __always_inline void slab_lock(struct page *page)
362+
static __always_inline void __slab_lock(struct page *page)
363363
{
364364
VM_BUG_ON_PAGE(PageTail(page), page);
365365
bit_spin_lock(PG_locked, &page->flags);
366366
}
367367

368-
static __always_inline void slab_unlock(struct page *page)
368+
static __always_inline void __slab_unlock(struct page *page)
369369
{
370370
VM_BUG_ON_PAGE(PageTail(page), page);
371371
__bit_spin_unlock(PG_locked, &page->flags);
372372
}
373373

374-
/* Interrupts must be disabled (for the fallback code to work right) */
374+
static __always_inline void slab_lock(struct page *page, unsigned long *flags)
375+
{
376+
if (IS_ENABLED(CONFIG_PREEMPT_RT))
377+
local_irq_save(*flags);
378+
__slab_lock(page);
379+
}
380+
381+
static __always_inline void slab_unlock(struct page *page, unsigned long *flags)
382+
{
383+
__slab_unlock(page);
384+
if (IS_ENABLED(CONFIG_PREEMPT_RT))
385+
local_irq_restore(*flags);
386+
}
387+
388+
/*
389+
* Interrupts must be disabled (for the fallback code to work right), typically
390+
* by an _irqsave() lock variant. Except on PREEMPT_RT where locks are different
391+
* so we disable interrupts as part of slab_[un]lock().
392+
*/
375393
static inline bool __cmpxchg_double_slab(struct kmem_cache *s, struct page *page,
376394
void *freelist_old, unsigned long counters_old,
377395
void *freelist_new, unsigned long counters_new,
378396
const char *n)
379397
{
380-
VM_BUG_ON(!irqs_disabled());
398+
if (!IS_ENABLED(CONFIG_PREEMPT_RT))
399+
lockdep_assert_irqs_disabled();
381400
#if defined(CONFIG_HAVE_CMPXCHG_DOUBLE) && \
382401
defined(CONFIG_HAVE_ALIGNED_STRUCT_PAGE)
383402
if (s->flags & __CMPXCHG_DOUBLE) {
@@ -388,15 +407,18 @@ static inline bool __cmpxchg_double_slab(struct kmem_cache *s, struct page *page
388407
} else
389408
#endif
390409
{
391-
slab_lock(page);
410+
/* init to 0 to prevent spurious warnings */
411+
unsigned long flags = 0;
412+
413+
slab_lock(page, &flags);
392414
if (page->freelist == freelist_old &&
393415
page->counters == counters_old) {
394416
page->freelist = freelist_new;
395417
page->counters = counters_new;
396-
slab_unlock(page);
418+
slab_unlock(page, &flags);
397419
return true;
398420
}
399-
slab_unlock(page);
421+
slab_unlock(page, &flags);
400422
}
401423

402424
cpu_relax();
@@ -427,16 +449,16 @@ static inline bool cmpxchg_double_slab(struct kmem_cache *s, struct page *page,
427449
unsigned long flags;
428450

429451
local_irq_save(flags);
430-
slab_lock(page);
452+
__slab_lock(page);
431453
if (page->freelist == freelist_old &&
432454
page->counters == counters_old) {
433455
page->freelist = freelist_new;
434456
page->counters = counters_new;
435-
slab_unlock(page);
457+
__slab_unlock(page);
436458
local_irq_restore(flags);
437459
return true;
438460
}
439-
slab_unlock(page);
461+
__slab_unlock(page);
440462
local_irq_restore(flags);
441463
}
442464

@@ -1269,11 +1291,11 @@ static noinline int free_debug_processing(
12691291
struct kmem_cache_node *n = get_node(s, page_to_nid(page));
12701292
void *object = head;
12711293
int cnt = 0;
1272-
unsigned long flags;
1294+
unsigned long flags, flags2;
12731295
int ret = 0;
12741296

12751297
spin_lock_irqsave(&n->list_lock, flags);
1276-
slab_lock(page);
1298+
slab_lock(page, &flags2);
12771299

12781300
if (s->flags & SLAB_CONSISTENCY_CHECKS) {
12791301
if (!check_slab(s, page))
@@ -1306,7 +1328,7 @@ static noinline int free_debug_processing(
13061328
slab_err(s, page, "Bulk freelist count(%d) invalid(%d)\n",
13071329
bulk_cnt, cnt);
13081330

1309-
slab_unlock(page);
1331+
slab_unlock(page, &flags2);
13101332
spin_unlock_irqrestore(&n->list_lock, flags);
13111333
if (!ret)
13121334
slab_fix(s, "Object at 0x%p not freed", object);
@@ -4087,11 +4109,12 @@ static void list_slab_objects(struct kmem_cache *s, struct page *page,
40874109
{
40884110
#ifdef CONFIG_SLUB_DEBUG
40894111
void *addr = page_address(page);
4112+
unsigned long flags;
40904113
unsigned long *map;
40914114
void *p;
40924115

40934116
slab_err(s, page, text, s->name);
4094-
slab_lock(page);
4117+
slab_lock(page, &flags);
40954118

40964119
map = get_map(s, page);
40974120
for_each_object(p, s, addr, page->objects) {
@@ -4102,7 +4125,7 @@ static void list_slab_objects(struct kmem_cache *s, struct page *page,
41024125
}
41034126
}
41044127
put_map(map);
4105-
slab_unlock(page);
4128+
slab_unlock(page, &flags);
41064129
#endif
41074130
}
41084131

@@ -4834,8 +4857,9 @@ static void validate_slab(struct kmem_cache *s, struct page *page,
48344857
{
48354858
void *p;
48364859
void *addr = page_address(page);
4860+
unsigned long flags;
48374861

4838-
slab_lock(page);
4862+
slab_lock(page, &flags);
48394863

48404864
if (!check_slab(s, page) || !on_freelist(s, page, NULL))
48414865
goto unlock;
@@ -4850,7 +4874,7 @@ static void validate_slab(struct kmem_cache *s, struct page *page,
48504874
break;
48514875
}
48524876
unlock:
4853-
slab_unlock(page);
4877+
slab_unlock(page, &flags);
48544878
}
48554879

48564880
static int validate_slab_node(struct kmem_cache *s,

0 commit comments

Comments
 (0)