@@ -138,9 +138,6 @@ static struct khugepaged_scan khugepaged_scan = {
138
138
.mm_head = LIST_HEAD_INIT (khugepaged_scan .mm_head ),
139
139
};
140
140
141
- static DEFINE_SPINLOCK (split_queue_lock );
142
- static LIST_HEAD (split_queue );
143
- static unsigned long split_queue_len ;
144
141
static struct shrinker deferred_split_shrinker ;
145
142
146
143
static void set_recommended_min_free_kbytes (void )
@@ -3358,6 +3355,7 @@ int total_mapcount(struct page *page)
3358
3355
int split_huge_page_to_list (struct page * page , struct list_head * list )
3359
3356
{
3360
3357
struct page * head = compound_head (page );
3358
+ struct pglist_data * pgdata = NODE_DATA (page_to_nid (head ));
3361
3359
struct anon_vma * anon_vma ;
3362
3360
int count , mapcount , ret ;
3363
3361
bool mlocked ;
@@ -3401,27 +3399,27 @@ int split_huge_page_to_list(struct page *page, struct list_head *list)
3401
3399
lru_add_drain ();
3402
3400
3403
3401
/* Prevent deferred_split_scan() touching ->_count */
3404
- spin_lock_irqsave (& split_queue_lock , flags );
3402
+ spin_lock_irqsave (& pgdata -> split_queue_lock , flags );
3405
3403
count = page_count (head );
3406
3404
mapcount = total_mapcount (head );
3407
3405
if (!mapcount && count == 1 ) {
3408
3406
if (!list_empty (page_deferred_list (head ))) {
3409
- split_queue_len -- ;
3407
+ pgdata -> split_queue_len -- ;
3410
3408
list_del (page_deferred_list (head ));
3411
3409
}
3412
- spin_unlock_irqrestore (& split_queue_lock , flags );
3410
+ spin_unlock_irqrestore (& pgdata -> split_queue_lock , flags );
3413
3411
__split_huge_page (page , list );
3414
3412
ret = 0 ;
3415
3413
} else if (IS_ENABLED (CONFIG_DEBUG_VM ) && mapcount ) {
3416
- spin_unlock_irqrestore (& split_queue_lock , flags );
3414
+ spin_unlock_irqrestore (& pgdata -> split_queue_lock , flags );
3417
3415
pr_alert ("total_mapcount: %u, page_count(): %u\n" ,
3418
3416
mapcount , count );
3419
3417
if (PageTail (page ))
3420
3418
dump_page (head , NULL );
3421
3419
dump_page (page , "total_mapcount(head) > 0" );
3422
3420
BUG ();
3423
3421
} else {
3424
- spin_unlock_irqrestore (& split_queue_lock , flags );
3422
+ spin_unlock_irqrestore (& pgdata -> split_queue_lock , flags );
3425
3423
unfreeze_page (anon_vma , head );
3426
3424
ret = - EBUSY ;
3427
3425
}
@@ -3436,52 +3434,56 @@ int split_huge_page_to_list(struct page *page, struct list_head *list)
3436
3434
3437
3435
void free_transhuge_page (struct page * page )
3438
3436
{
3437
+ struct pglist_data * pgdata = NODE_DATA (page_to_nid (page ));
3439
3438
unsigned long flags ;
3440
3439
3441
- spin_lock_irqsave (& split_queue_lock , flags );
3440
+ spin_lock_irqsave (& pgdata -> split_queue_lock , flags );
3442
3441
if (!list_empty (page_deferred_list (page ))) {
3443
- split_queue_len -- ;
3442
+ pgdata -> split_queue_len -- ;
3444
3443
list_del (page_deferred_list (page ));
3445
3444
}
3446
- spin_unlock_irqrestore (& split_queue_lock , flags );
3445
+ spin_unlock_irqrestore (& pgdata -> split_queue_lock , flags );
3447
3446
free_compound_page (page );
3448
3447
}
3449
3448
3450
3449
void deferred_split_huge_page (struct page * page )
3451
3450
{
3451
+ struct pglist_data * pgdata = NODE_DATA (page_to_nid (page ));
3452
3452
unsigned long flags ;
3453
3453
3454
3454
VM_BUG_ON_PAGE (!PageTransHuge (page ), page );
3455
3455
3456
- spin_lock_irqsave (& split_queue_lock , flags );
3456
+ spin_lock_irqsave (& pgdata -> split_queue_lock , flags );
3457
3457
if (list_empty (page_deferred_list (page ))) {
3458
- list_add_tail (page_deferred_list (page ), & split_queue );
3459
- split_queue_len ++ ;
3458
+ list_add_tail (page_deferred_list (page ), & pgdata -> split_queue );
3459
+ pgdata -> split_queue_len ++ ;
3460
3460
}
3461
- spin_unlock_irqrestore (& split_queue_lock , flags );
3461
+ spin_unlock_irqrestore (& pgdata -> split_queue_lock , flags );
3462
3462
}
3463
3463
3464
3464
static unsigned long deferred_split_count (struct shrinker * shrink ,
3465
3465
struct shrink_control * sc )
3466
3466
{
3467
+ struct pglist_data * pgdata = NODE_DATA (sc -> nid );
3467
3468
/*
3468
3469
* Split a page from split_queue will free up at least one page,
3469
3470
* at most HPAGE_PMD_NR - 1. We don't track exact number.
3470
3471
* Let's use HPAGE_PMD_NR / 2 as ballpark.
3471
3472
*/
3472
- return ACCESS_ONCE (split_queue_len ) * HPAGE_PMD_NR / 2 ;
3473
+ return ACCESS_ONCE (pgdata -> split_queue_len ) * HPAGE_PMD_NR / 2 ;
3473
3474
}
3474
3475
3475
3476
static unsigned long deferred_split_scan (struct shrinker * shrink ,
3476
3477
struct shrink_control * sc )
3477
3478
{
3479
+ struct pglist_data * pgdata = NODE_DATA (sc -> nid );
3478
3480
unsigned long flags ;
3479
3481
LIST_HEAD (list ), * pos , * next ;
3480
3482
struct page * page ;
3481
3483
int split = 0 ;
3482
3484
3483
- spin_lock_irqsave (& split_queue_lock , flags );
3484
- list_splice_init (& split_queue , & list );
3485
+ spin_lock_irqsave (& pgdata -> split_queue_lock , flags );
3486
+ list_splice_init (& pgdata -> split_queue , & list );
3485
3487
3486
3488
/* Take pin on all head pages to avoid freeing them under us */
3487
3489
list_for_each_safe (pos , next , & list ) {
@@ -3490,10 +3492,10 @@ static unsigned long deferred_split_scan(struct shrinker *shrink,
3490
3492
/* race with put_compound_page() */
3491
3493
if (!get_page_unless_zero (page )) {
3492
3494
list_del_init (page_deferred_list (page ));
3493
- split_queue_len -- ;
3495
+ pgdata -> split_queue_len -- ;
3494
3496
}
3495
3497
}
3496
- spin_unlock_irqrestore (& split_queue_lock , flags );
3498
+ spin_unlock_irqrestore (& pgdata -> split_queue_lock , flags );
3497
3499
3498
3500
list_for_each_safe (pos , next , & list ) {
3499
3501
page = list_entry ((void * )pos , struct page , mapping );
@@ -3505,9 +3507,9 @@ static unsigned long deferred_split_scan(struct shrinker *shrink,
3505
3507
put_page (page );
3506
3508
}
3507
3509
3508
- spin_lock_irqsave (& split_queue_lock , flags );
3509
- list_splice_tail (& list , & split_queue );
3510
- spin_unlock_irqrestore (& split_queue_lock , flags );
3510
+ spin_lock_irqsave (& pgdata -> split_queue_lock , flags );
3511
+ list_splice_tail (& list , & pgdata -> split_queue );
3512
+ spin_unlock_irqrestore (& pgdata -> split_queue_lock , flags );
3511
3513
3512
3514
return split * HPAGE_PMD_NR / 2 ;
3513
3515
}
@@ -3516,6 +3518,7 @@ static struct shrinker deferred_split_shrinker = {
3516
3518
.count_objects = deferred_split_count ,
3517
3519
.scan_objects = deferred_split_scan ,
3518
3520
.seeks = DEFAULT_SEEKS ,
3521
+ .flags = SHRINKER_NUMA_AWARE ,
3519
3522
};
3520
3523
3521
3524
#ifdef CONFIG_DEBUG_FS
0 commit comments