@@ -1376,7 +1376,7 @@ static void remove_hugetlb_page(struct hstate *h, struct page *page,
1376
1376
h -> nr_huge_pages_node [nid ]-- ;
1377
1377
}
1378
1378
1379
- static void update_and_free_page (struct hstate * h , struct page * page )
1379
+ static void __update_and_free_page (struct hstate * h , struct page * page )
1380
1380
{
1381
1381
int i ;
1382
1382
struct page * subpage = page ;
@@ -1399,12 +1399,79 @@ static void update_and_free_page(struct hstate *h, struct page *page)
1399
1399
}
1400
1400
}
1401
1401
1402
+ /*
1403
+ * As update_and_free_page() can be called under any context, so we cannot
1404
+ * use GFP_KERNEL to allocate vmemmap pages. However, we can defer the
1405
+ * actual freeing in a workqueue to prevent from using GFP_ATOMIC to allocate
1406
+ * the vmemmap pages.
1407
+ *
1408
+ * free_hpage_workfn() locklessly retrieves the linked list of pages to be
1409
+ * freed and frees them one-by-one. As the page->mapping pointer is going
1410
+ * to be cleared in free_hpage_workfn() anyway, it is reused as the llist_node
1411
+ * structure of a lockless linked list of huge pages to be freed.
1412
+ */
1413
+ static LLIST_HEAD (hpage_freelist );
1414
+
1415
+ static void free_hpage_workfn (struct work_struct * work )
1416
+ {
1417
+ struct llist_node * node ;
1418
+
1419
+ node = llist_del_all (& hpage_freelist );
1420
+
1421
+ while (node ) {
1422
+ struct page * page ;
1423
+ struct hstate * h ;
1424
+
1425
+ page = container_of ((struct address_space * * )node ,
1426
+ struct page , mapping );
1427
+ node = node -> next ;
1428
+ page -> mapping = NULL ;
1429
+ /*
1430
+ * The VM_BUG_ON_PAGE(!PageHuge(page), page) in page_hstate()
1431
+ * is going to trigger because a previous call to
1432
+ * remove_hugetlb_page() will set_compound_page_dtor(page,
1433
+ * NULL_COMPOUND_DTOR), so do not use page_hstate() directly.
1434
+ */
1435
+ h = size_to_hstate (page_size (page ));
1436
+
1437
+ __update_and_free_page (h , page );
1438
+
1439
+ cond_resched ();
1440
+ }
1441
+ }
1442
+ static DECLARE_WORK (free_hpage_work , free_hpage_workfn ) ;
1443
+
1444
+ static inline void flush_free_hpage_work (struct hstate * h )
1445
+ {
1446
+ if (free_vmemmap_pages_per_hpage (h ))
1447
+ flush_work (& free_hpage_work );
1448
+ }
1449
+
1450
+ static void update_and_free_page (struct hstate * h , struct page * page ,
1451
+ bool atomic )
1452
+ {
1453
+ if (!free_vmemmap_pages_per_hpage (h ) || !atomic ) {
1454
+ __update_and_free_page (h , page );
1455
+ return ;
1456
+ }
1457
+
1458
+ /*
1459
+ * Defer freeing to avoid using GFP_ATOMIC to allocate vmemmap pages.
1460
+ *
1461
+ * Only call schedule_work() if hpage_freelist is previously
1462
+ * empty. Otherwise, schedule_work() had been called but the workfn
1463
+ * hasn't retrieved the list yet.
1464
+ */
1465
+ if (llist_add ((struct llist_node * )& page -> mapping , & hpage_freelist ))
1466
+ schedule_work (& free_hpage_work );
1467
+ }
1468
+
1402
1469
static void update_and_free_pages_bulk (struct hstate * h , struct list_head * list )
1403
1470
{
1404
1471
struct page * page , * t_page ;
1405
1472
1406
1473
list_for_each_entry_safe (page , t_page , list , lru ) {
1407
- update_and_free_page (h , page );
1474
+ update_and_free_page (h , page , false );
1408
1475
cond_resched ();
1409
1476
}
1410
1477
}
@@ -1471,12 +1538,12 @@ void free_huge_page(struct page *page)
1471
1538
if (HPageTemporary (page )) {
1472
1539
remove_hugetlb_page (h , page , false);
1473
1540
spin_unlock_irqrestore (& hugetlb_lock , flags );
1474
- update_and_free_page (h , page );
1541
+ update_and_free_page (h , page , true );
1475
1542
} else if (h -> surplus_huge_pages_node [nid ]) {
1476
1543
/* remove the page from active list */
1477
1544
remove_hugetlb_page (h , page , true);
1478
1545
spin_unlock_irqrestore (& hugetlb_lock , flags );
1479
- update_and_free_page (h , page );
1546
+ update_and_free_page (h , page , true );
1480
1547
} else {
1481
1548
arch_clear_hugepage_flags (page );
1482
1549
enqueue_huge_page (h , page );
@@ -1795,7 +1862,7 @@ int dissolve_free_huge_page(struct page *page)
1795
1862
remove_hugetlb_page (h , head , false);
1796
1863
h -> max_huge_pages -- ;
1797
1864
spin_unlock_irq (& hugetlb_lock );
1798
- update_and_free_page (h , head );
1865
+ update_and_free_page (h , head , false );
1799
1866
return 0 ;
1800
1867
}
1801
1868
out :
@@ -2411,14 +2478,14 @@ static int alloc_and_dissolve_huge_page(struct hstate *h, struct page *old_page,
2411
2478
* Pages have been replaced, we can safely free the old one.
2412
2479
*/
2413
2480
spin_unlock_irq (& hugetlb_lock );
2414
- update_and_free_page (h , old_page );
2481
+ update_and_free_page (h , old_page , false );
2415
2482
}
2416
2483
2417
2484
return ret ;
2418
2485
2419
2486
free_new :
2420
2487
spin_unlock_irq (& hugetlb_lock );
2421
- update_and_free_page (h , new_page );
2488
+ update_and_free_page (h , new_page , false );
2422
2489
2423
2490
return ret ;
2424
2491
}
@@ -2832,6 +2899,7 @@ static int set_max_huge_pages(struct hstate *h, unsigned long count, int nid,
2832
2899
* pages in hstate via the proc/sysfs interfaces.
2833
2900
*/
2834
2901
mutex_lock (& h -> resize_lock );
2902
+ flush_free_hpage_work (h );
2835
2903
spin_lock_irq (& hugetlb_lock );
2836
2904
2837
2905
/*
@@ -2941,6 +3009,7 @@ static int set_max_huge_pages(struct hstate *h, unsigned long count, int nid,
2941
3009
/* free the pages after dropping lock */
2942
3010
spin_unlock_irq (& hugetlb_lock );
2943
3011
update_and_free_pages_bulk (h , & page_list );
3012
+ flush_free_hpage_work (h );
2944
3013
spin_lock_irq (& hugetlb_lock );
2945
3014
2946
3015
while (count < persistent_huge_pages (h )) {
0 commit comments