Skip to content

Commit 5a836bf

Browse files
Sebastian Andrzej Siewiortehcaster
authored andcommitted
mm: slub: move flush_cpu_slab() invocations __free_slab() invocations out of IRQ context
flush_all() flushes a specific SLAB cache on each CPU (where the cache is present). The deactivate_slab()/__free_slab() invocation happens within IPI handler and is problematic for PREEMPT_RT. The flush operation is not a frequent operation or a hot path. The per-CPU flush operation can be moved to within a workqueue. Because a workqueue handler, unlike IPI handler, does not disable irqs, flush_slab() now has to disable them for working with the kmem_cache_cpu fields. deactivate_slab() is safe to call with irqs enabled. [[email protected]: adapt to new SLUB changes] Signed-off-by: Sebastian Andrzej Siewior <[email protected]> Signed-off-by: Vlastimil Babka <[email protected]>
1 parent 08beb54 commit 5a836bf

File tree

2 files changed

+80
-16
lines changed

2 files changed

+80
-16
lines changed

mm/slab_common.c

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -502,6 +502,7 @@ void kmem_cache_destroy(struct kmem_cache *s)
502502
if (unlikely(!s))
503503
return;
504504

505+
cpus_read_lock();
505506
mutex_lock(&slab_mutex);
506507

507508
s->refcount--;
@@ -516,6 +517,7 @@ void kmem_cache_destroy(struct kmem_cache *s)
516517
}
517518
out_unlock:
518519
mutex_unlock(&slab_mutex);
520+
cpus_read_unlock();
519521
}
520522
EXPORT_SYMBOL(kmem_cache_destroy);
521523

mm/slub.c

Lines changed: 78 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -2496,16 +2496,25 @@ static void put_cpu_partial(struct kmem_cache *s, struct page *page, int drain)
24962496

24972497
static inline void flush_slab(struct kmem_cache *s, struct kmem_cache_cpu *c)
24982498
{
2499-
void *freelist = c->freelist;
2500-
struct page *page = c->page;
2499+
unsigned long flags;
2500+
struct page *page;
2501+
void *freelist;
2502+
2503+
local_irq_save(flags);
2504+
2505+
page = c->page;
2506+
freelist = c->freelist;
25012507

25022508
c->page = NULL;
25032509
c->freelist = NULL;
25042510
c->tid = next_tid(c->tid);
25052511

2506-
deactivate_slab(s, page, freelist);
2512+
local_irq_restore(flags);
25072513

2508-
stat(s, CPUSLAB_FLUSH);
2514+
if (page) {
2515+
deactivate_slab(s, page, freelist);
2516+
stat(s, CPUSLAB_FLUSH);
2517+
}
25092518
}
25102519

25112520
static inline void __flush_cpu_slab(struct kmem_cache *s, int cpu)
@@ -2526,33 +2535,79 @@ static inline void __flush_cpu_slab(struct kmem_cache *s, int cpu)
25262535
unfreeze_partials_cpu(s, c);
25272536
}
25282537

2538+
struct slub_flush_work {
2539+
struct work_struct work;
2540+
struct kmem_cache *s;
2541+
bool skip;
2542+
};
2543+
25292544
/*
25302545
* Flush cpu slab.
25312546
*
2532-
* Called from IPI handler with interrupts disabled.
2547+
* Called from CPU work handler with migration disabled.
25332548
*/
2534-
static void flush_cpu_slab(void *d)
2549+
static void flush_cpu_slab(struct work_struct *w)
25352550
{
2536-
struct kmem_cache *s = d;
2537-
struct kmem_cache_cpu *c = this_cpu_ptr(s->cpu_slab);
2551+
struct kmem_cache *s;
2552+
struct kmem_cache_cpu *c;
2553+
struct slub_flush_work *sfw;
2554+
2555+
sfw = container_of(w, struct slub_flush_work, work);
2556+
2557+
s = sfw->s;
2558+
c = this_cpu_ptr(s->cpu_slab);
25382559

25392560
if (c->page)
25402561
flush_slab(s, c);
25412562

25422563
unfreeze_partials(s);
25432564
}
25442565

2545-
static bool has_cpu_slab(int cpu, void *info)
2566+
static bool has_cpu_slab(int cpu, struct kmem_cache *s)
25462567
{
2547-
struct kmem_cache *s = info;
25482568
struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, cpu);
25492569

25502570
return c->page || slub_percpu_partial(c);
25512571
}
25522572

2573+
static DEFINE_MUTEX(flush_lock);
2574+
static DEFINE_PER_CPU(struct slub_flush_work, slub_flush);
2575+
2576+
static void flush_all_cpus_locked(struct kmem_cache *s)
2577+
{
2578+
struct slub_flush_work *sfw;
2579+
unsigned int cpu;
2580+
2581+
lockdep_assert_cpus_held();
2582+
mutex_lock(&flush_lock);
2583+
2584+
for_each_online_cpu(cpu) {
2585+
sfw = &per_cpu(slub_flush, cpu);
2586+
if (!has_cpu_slab(cpu, s)) {
2587+
sfw->skip = true;
2588+
continue;
2589+
}
2590+
INIT_WORK(&sfw->work, flush_cpu_slab);
2591+
sfw->skip = false;
2592+
sfw->s = s;
2593+
schedule_work_on(cpu, &sfw->work);
2594+
}
2595+
2596+
for_each_online_cpu(cpu) {
2597+
sfw = &per_cpu(slub_flush, cpu);
2598+
if (sfw->skip)
2599+
continue;
2600+
flush_work(&sfw->work);
2601+
}
2602+
2603+
mutex_unlock(&flush_lock);
2604+
}
2605+
25532606
static void flush_all(struct kmem_cache *s)
25542607
{
2555-
on_each_cpu_cond(has_cpu_slab, flush_cpu_slab, s, 1);
2608+
cpus_read_lock();
2609+
flush_all_cpus_locked(s);
2610+
cpus_read_unlock();
25562611
}
25572612

25582613
/*
@@ -4097,7 +4152,7 @@ int __kmem_cache_shutdown(struct kmem_cache *s)
40974152
int node;
40984153
struct kmem_cache_node *n;
40994154

4100-
flush_all(s);
4155+
flush_all_cpus_locked(s);
41014156
/* Attempt to free all objects */
41024157
for_each_kmem_cache_node(s, node, n) {
41034158
free_partial(s, n);
@@ -4373,7 +4428,7 @@ EXPORT_SYMBOL(kfree);
43734428
* being allocated from last increasing the chance that the last objects
43744429
* are freed in them.
43754430
*/
4376-
int __kmem_cache_shrink(struct kmem_cache *s)
4431+
static int __kmem_cache_do_shrink(struct kmem_cache *s)
43774432
{
43784433
int node;
43794434
int i;
@@ -4385,7 +4440,6 @@ int __kmem_cache_shrink(struct kmem_cache *s)
43854440
unsigned long flags;
43864441
int ret = 0;
43874442

4388-
flush_all(s);
43894443
for_each_kmem_cache_node(s, node, n) {
43904444
INIT_LIST_HEAD(&discard);
43914445
for (i = 0; i < SHRINK_PROMOTE_MAX; i++)
@@ -4435,13 +4489,21 @@ int __kmem_cache_shrink(struct kmem_cache *s)
44354489
return ret;
44364490
}
44374491

4492+
int __kmem_cache_shrink(struct kmem_cache *s)
4493+
{
4494+
flush_all(s);
4495+
return __kmem_cache_do_shrink(s);
4496+
}
4497+
44384498
static int slab_mem_going_offline_callback(void *arg)
44394499
{
44404500
struct kmem_cache *s;
44414501

44424502
mutex_lock(&slab_mutex);
4443-
list_for_each_entry(s, &slab_caches, list)
4444-
__kmem_cache_shrink(s);
4503+
list_for_each_entry(s, &slab_caches, list) {
4504+
flush_all_cpus_locked(s);
4505+
__kmem_cache_do_shrink(s);
4506+
}
44454507
mutex_unlock(&slab_mutex);
44464508

44474509
return 0;

0 commit comments

Comments
 (0)