Skip to content

Commit fc1455f

Browse files
committed
mm, slub: separate detaching of partial list in unfreeze_partials() from unfreezing
Unfreezing partial list can be split to two phases - detaching the list from struct kmem_cache_cpu, and processing the list. The whole operation does not need to be protected by disabled irqs. Restructure the code to separate the detaching (with disabled irqs) and unfreezing (with irq disabling to be reduced in the next patch). Also, unfreeze_partials() can be called from another cpu on behalf of a cpu that is being offlined, where disabling irqs on the local cpu has no sense, so restructure the code as follows: - __unfreeze_partials() is the bulk of unfreeze_partials() that processes the detached percpu partial list - unfreeze_partials() detaches list from current cpu with irqs disabled and calls __unfreeze_partials() - unfreeze_partials_cpu() is to be called for the offlined cpu so it needs no irq disabling, and is called from __flush_cpu_slab() - flush_cpu_slab() is for the local cpu thus it needs to call unfreeze_partials(). So it can't simply call __flush_cpu_slab(smp_processor_id()) anymore and we have to open-code the proper calls. Signed-off-by: Vlastimil Babka <[email protected]>
1 parent c2f973b commit fc1455f

File tree

1 file changed

+51
-22
lines changed

1 file changed

+51
-22
lines changed

mm/slub.c

Lines changed: 51 additions & 22 deletions
Original file line numberDiff line numberDiff line change
@@ -2347,25 +2347,15 @@ static void deactivate_slab(struct kmem_cache *s, struct page *page,
23472347
}
23482348
}
23492349

2350-
/*
2351-
* Unfreeze all the cpu partial slabs.
2352-
*
2353-
* This function must be called with preemption or migration
2354-
* disabled with c local to the cpu.
2355-
*/
2356-
static void unfreeze_partials(struct kmem_cache *s,
2357-
struct kmem_cache_cpu *c)
2358-
{
23592350
#ifdef CONFIG_SLUB_CPU_PARTIAL
2351+
static void __unfreeze_partials(struct kmem_cache *s, struct page *partial_page)
2352+
{
23602353
struct kmem_cache_node *n = NULL, *n2 = NULL;
2361-
struct page *page, *partial_page, *discard_page = NULL;
2354+
struct page *page, *discard_page = NULL;
23622355
unsigned long flags;
23632356

23642357
local_irq_save(flags);
23652358

2366-
partial_page = slub_percpu_partial(c);
2367-
c->partial = NULL;
2368-
23692359
while (partial_page) {
23702360
struct page new;
23712361
struct page old;
@@ -2420,10 +2410,45 @@ static void unfreeze_partials(struct kmem_cache *s,
24202410
discard_slab(s, page);
24212411
stat(s, FREE_SLAB);
24222412
}
2413+
}
24232414

2424-
#endif /* CONFIG_SLUB_CPU_PARTIAL */
2415+
/*
2416+
* Unfreeze all the cpu partial slabs.
2417+
*/
2418+
static void unfreeze_partials(struct kmem_cache *s)
2419+
{
2420+
struct page *partial_page;
2421+
unsigned long flags;
2422+
2423+
local_irq_save(flags);
2424+
partial_page = this_cpu_read(s->cpu_slab->partial);
2425+
this_cpu_write(s->cpu_slab->partial, NULL);
2426+
local_irq_restore(flags);
2427+
2428+
if (partial_page)
2429+
__unfreeze_partials(s, partial_page);
24252430
}
24262431

2432+
static void unfreeze_partials_cpu(struct kmem_cache *s,
2433+
struct kmem_cache_cpu *c)
2434+
{
2435+
struct page *partial_page;
2436+
2437+
partial_page = slub_percpu_partial(c);
2438+
c->partial = NULL;
2439+
2440+
if (partial_page)
2441+
__unfreeze_partials(s, partial_page);
2442+
}
2443+
2444+
#else /* CONFIG_SLUB_CPU_PARTIAL */
2445+
2446+
static inline void unfreeze_partials(struct kmem_cache *s) { }
2447+
static inline void unfreeze_partials_cpu(struct kmem_cache *s,
2448+
struct kmem_cache_cpu *c) { }
2449+
2450+
#endif /* CONFIG_SLUB_CPU_PARTIAL */
2451+
24272452
/*
24282453
* Put a page that was just frozen (in __slab_free|get_partial_node) into a
24292454
* partial page slot if available.
@@ -2452,7 +2477,7 @@ static void put_cpu_partial(struct kmem_cache *s, struct page *page, int drain)
24522477
* partial array is full. Move the existing
24532478
* set to the per node partial list.
24542479
*/
2455-
unfreeze_partials(s, this_cpu_ptr(s->cpu_slab));
2480+
unfreeze_partials(s);
24562481
oldpage = NULL;
24572482
pobjects = 0;
24582483
pages = 0;
@@ -2487,26 +2512,30 @@ static inline void flush_slab(struct kmem_cache *s, struct kmem_cache_cpu *c)
24872512
stat(s, CPUSLAB_FLUSH);
24882513
}
24892514

2490-
/*
2491-
* Flush cpu slab.
2492-
*
2493-
* Called from IPI handler with interrupts disabled.
2494-
*/
24952515
static inline void __flush_cpu_slab(struct kmem_cache *s, int cpu)
24962516
{
24972517
struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, cpu);
24982518

24992519
if (c->page)
25002520
flush_slab(s, c);
25012521

2502-
unfreeze_partials(s, c);
2522+
unfreeze_partials_cpu(s, c);
25032523
}
25042524

2525+
/*
2526+
* Flush cpu slab.
2527+
*
2528+
* Called from IPI handler with interrupts disabled.
2529+
*/
25052530
static void flush_cpu_slab(void *d)
25062531
{
25072532
struct kmem_cache *s = d;
2533+
struct kmem_cache_cpu *c = this_cpu_ptr(s->cpu_slab);
25082534

2509-
__flush_cpu_slab(s, smp_processor_id());
2535+
if (c->page)
2536+
flush_slab(s, c);
2537+
2538+
unfreeze_partials(s);
25102539
}
25112540

25122541
static bool has_cpu_slab(int cpu, void *info)

0 commit comments

Comments
 (0)