@@ -2496,16 +2496,25 @@ static void put_cpu_partial(struct kmem_cache *s, struct page *page, int drain)
2496
2496
2497
2497
static inline void flush_slab (struct kmem_cache * s , struct kmem_cache_cpu * c )
2498
2498
{
2499
- void * freelist = c -> freelist ;
2500
- struct page * page = c -> page ;
2499
+ unsigned long flags ;
2500
+ struct page * page ;
2501
+ void * freelist ;
2502
+
2503
+ local_irq_save (flags );
2504
+
2505
+ page = c -> page ;
2506
+ freelist = c -> freelist ;
2501
2507
2502
2508
c -> page = NULL ;
2503
2509
c -> freelist = NULL ;
2504
2510
c -> tid = next_tid (c -> tid );
2505
2511
2506
- deactivate_slab ( s , page , freelist );
2512
+ local_irq_restore ( flags );
2507
2513
2508
- stat (s , CPUSLAB_FLUSH );
2514
+ if (page ) {
2515
+ deactivate_slab (s , page , freelist );
2516
+ stat (s , CPUSLAB_FLUSH );
2517
+ }
2509
2518
}
2510
2519
2511
2520
static inline void __flush_cpu_slab (struct kmem_cache * s , int cpu )
@@ -2526,33 +2535,79 @@ static inline void __flush_cpu_slab(struct kmem_cache *s, int cpu)
2526
2535
unfreeze_partials_cpu (s , c );
2527
2536
}
2528
2537
2538
+ struct slub_flush_work {
2539
+ struct work_struct work ;
2540
+ struct kmem_cache * s ;
2541
+ bool skip ;
2542
+ };
2543
+
2529
2544
/*
2530
2545
* Flush cpu slab.
2531
2546
*
2532
- * Called from IPI handler with interrupts disabled.
2547
+ * Called from CPU work handler with migration disabled.
2533
2548
*/
2534
- static void flush_cpu_slab (void * d )
2549
+ static void flush_cpu_slab (struct work_struct * w )
2535
2550
{
2536
- struct kmem_cache * s = d ;
2537
- struct kmem_cache_cpu * c = this_cpu_ptr (s -> cpu_slab );
2551
+ struct kmem_cache * s ;
2552
+ struct kmem_cache_cpu * c ;
2553
+ struct slub_flush_work * sfw ;
2554
+
2555
+ sfw = container_of (w , struct slub_flush_work , work );
2556
+
2557
+ s = sfw -> s ;
2558
+ c = this_cpu_ptr (s -> cpu_slab );
2538
2559
2539
2560
if (c -> page )
2540
2561
flush_slab (s , c );
2541
2562
2542
2563
unfreeze_partials (s );
2543
2564
}
2544
2565
2545
- static bool has_cpu_slab (int cpu , void * info )
2566
+ static bool has_cpu_slab (int cpu , struct kmem_cache * s )
2546
2567
{
2547
- struct kmem_cache * s = info ;
2548
2568
struct kmem_cache_cpu * c = per_cpu_ptr (s -> cpu_slab , cpu );
2549
2569
2550
2570
return c -> page || slub_percpu_partial (c );
2551
2571
}
2552
2572
2573
+ static DEFINE_MUTEX (flush_lock );
2574
+ static DEFINE_PER_CPU (struct slub_flush_work , slub_flush ) ;
2575
+
2576
+ static void flush_all_cpus_locked (struct kmem_cache * s )
2577
+ {
2578
+ struct slub_flush_work * sfw ;
2579
+ unsigned int cpu ;
2580
+
2581
+ lockdep_assert_cpus_held ();
2582
+ mutex_lock (& flush_lock );
2583
+
2584
+ for_each_online_cpu (cpu ) {
2585
+ sfw = & per_cpu (slub_flush , cpu );
2586
+ if (!has_cpu_slab (cpu , s )) {
2587
+ sfw -> skip = true;
2588
+ continue ;
2589
+ }
2590
+ INIT_WORK (& sfw -> work , flush_cpu_slab );
2591
+ sfw -> skip = false;
2592
+ sfw -> s = s ;
2593
+ schedule_work_on (cpu , & sfw -> work );
2594
+ }
2595
+
2596
+ for_each_online_cpu (cpu ) {
2597
+ sfw = & per_cpu (slub_flush , cpu );
2598
+ if (sfw -> skip )
2599
+ continue ;
2600
+ flush_work (& sfw -> work );
2601
+ }
2602
+
2603
+ mutex_unlock (& flush_lock );
2604
+ }
2605
+
2553
2606
static void flush_all (struct kmem_cache * s )
2554
2607
{
2555
- on_each_cpu_cond (has_cpu_slab , flush_cpu_slab , s , 1 );
2608
+ cpus_read_lock ();
2609
+ flush_all_cpus_locked (s );
2610
+ cpus_read_unlock ();
2556
2611
}
2557
2612
2558
2613
/*
@@ -4097,7 +4152,7 @@ int __kmem_cache_shutdown(struct kmem_cache *s)
4097
4152
int node ;
4098
4153
struct kmem_cache_node * n ;
4099
4154
4100
- flush_all (s );
4155
+ flush_all_cpus_locked (s );
4101
4156
/* Attempt to free all objects */
4102
4157
for_each_kmem_cache_node (s , node , n ) {
4103
4158
free_partial (s , n );
@@ -4373,7 +4428,7 @@ EXPORT_SYMBOL(kfree);
4373
4428
* being allocated from last increasing the chance that the last objects
4374
4429
* are freed in them.
4375
4430
*/
4376
- int __kmem_cache_shrink (struct kmem_cache * s )
4431
+ static int __kmem_cache_do_shrink (struct kmem_cache * s )
4377
4432
{
4378
4433
int node ;
4379
4434
int i ;
@@ -4385,7 +4440,6 @@ int __kmem_cache_shrink(struct kmem_cache *s)
4385
4440
unsigned long flags ;
4386
4441
int ret = 0 ;
4387
4442
4388
- flush_all (s );
4389
4443
for_each_kmem_cache_node (s , node , n ) {
4390
4444
INIT_LIST_HEAD (& discard );
4391
4445
for (i = 0 ; i < SHRINK_PROMOTE_MAX ; i ++ )
@@ -4435,13 +4489,21 @@ int __kmem_cache_shrink(struct kmem_cache *s)
4435
4489
return ret ;
4436
4490
}
4437
4491
4492
+ int __kmem_cache_shrink (struct kmem_cache * s )
4493
+ {
4494
+ flush_all (s );
4495
+ return __kmem_cache_do_shrink (s );
4496
+ }
4497
+
4438
4498
static int slab_mem_going_offline_callback (void * arg )
4439
4499
{
4440
4500
struct kmem_cache * s ;
4441
4501
4442
4502
mutex_lock (& slab_mutex );
4443
- list_for_each_entry (s , & slab_caches , list )
4444
- __kmem_cache_shrink (s );
4503
+ list_for_each_entry (s , & slab_caches , list ) {
4504
+ flush_all_cpus_locked (s );
4505
+ __kmem_cache_do_shrink (s );
4506
+ }
4445
4507
mutex_unlock (& slab_mutex );
4446
4508
4447
4509
return 0 ;
0 commit comments