Skip to content

Commit b65d4ad

Browse files
Muchun Songtorvalds
authored andcommitted
mm: hugetlb: defer freeing of HugeTLB pages
In the subsequent patch, we should allocate the vmemmap pages when freeing a HugeTLB page. But update_and_free_page() can be called under any context, so we cannot use GFP_KERNEL to allocate vmemmap pages. However, we can defer the actual freeing in a kworker to prevent from using GFP_ATOMIC to allocate the vmemmap pages. The __update_and_free_page() is where the call to allocate vmemmmap pages will be inserted. Link: https://lkml.kernel.org/r/[email protected] Signed-off-by: Muchun Song <[email protected]> Reviewed-by: Mike Kravetz <[email protected]> Reviewed-by: Oscar Salvador <[email protected]> Cc: Alexander Viro <[email protected]> Cc: Andy Lutomirski <[email protected]> Cc: Anshuman Khandual <[email protected]> Cc: Balbir Singh <[email protected]> Cc: Barry Song <[email protected]> Cc: Bodeddula Balasubramaniam <[email protected]> Cc: Borislav Petkov <[email protected]> Cc: Chen Huang <[email protected]> Cc: Dave Hansen <[email protected]> Cc: David Hildenbrand <[email protected]> Cc: David Rientjes <[email protected]> Cc: HORIGUCHI NAOYA <[email protected]> Cc: "H. Peter Anvin" <[email protected]> Cc: Ingo Molnar <[email protected]> Cc: Joao Martins <[email protected]> Cc: Joerg Roedel <[email protected]> Cc: Jonathan Corbet <[email protected]> Cc: Matthew Wilcox <[email protected]> Cc: Miaohe Lin <[email protected]> Cc: Michal Hocko <[email protected]> Cc: Mina Almasry <[email protected]> Cc: Oliver Neukum <[email protected]> Cc: Paul E. McKenney <[email protected]> Cc: Pawan Gupta <[email protected]> Cc: Peter Zijlstra <[email protected]> Cc: Randy Dunlap <[email protected]> Cc: Thomas Gleixner <[email protected]> Cc: Xiongchun Duan <[email protected]> Signed-off-by: Andrew Morton <[email protected]> Signed-off-by: Linus Torvalds <[email protected]>
1 parent f41f2ed commit b65d4ad

File tree

3 files changed

+93
-19
lines changed

3 files changed

+93
-19
lines changed

mm/hugetlb.c

Lines changed: 76 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -1376,7 +1376,7 @@ static void remove_hugetlb_page(struct hstate *h, struct page *page,
13761376
h->nr_huge_pages_node[nid]--;
13771377
}
13781378

1379-
static void update_and_free_page(struct hstate *h, struct page *page)
1379+
static void __update_and_free_page(struct hstate *h, struct page *page)
13801380
{
13811381
int i;
13821382
struct page *subpage = page;
@@ -1399,12 +1399,79 @@ static void update_and_free_page(struct hstate *h, struct page *page)
13991399
}
14001400
}
14011401

1402+
/*
1403+
* As update_and_free_page() can be called under any context, so we cannot
1404+
* use GFP_KERNEL to allocate vmemmap pages. However, we can defer the
1405+
* actual freeing in a workqueue to prevent from using GFP_ATOMIC to allocate
1406+
* the vmemmap pages.
1407+
*
1408+
* free_hpage_workfn() locklessly retrieves the linked list of pages to be
1409+
* freed and frees them one-by-one. As the page->mapping pointer is going
1410+
* to be cleared in free_hpage_workfn() anyway, it is reused as the llist_node
1411+
* structure of a lockless linked list of huge pages to be freed.
1412+
*/
1413+
static LLIST_HEAD(hpage_freelist);
1414+
1415+
static void free_hpage_workfn(struct work_struct *work)
1416+
{
1417+
struct llist_node *node;
1418+
1419+
node = llist_del_all(&hpage_freelist);
1420+
1421+
while (node) {
1422+
struct page *page;
1423+
struct hstate *h;
1424+
1425+
page = container_of((struct address_space **)node,
1426+
struct page, mapping);
1427+
node = node->next;
1428+
page->mapping = NULL;
1429+
/*
1430+
* The VM_BUG_ON_PAGE(!PageHuge(page), page) in page_hstate()
1431+
* is going to trigger because a previous call to
1432+
* remove_hugetlb_page() will set_compound_page_dtor(page,
1433+
* NULL_COMPOUND_DTOR), so do not use page_hstate() directly.
1434+
*/
1435+
h = size_to_hstate(page_size(page));
1436+
1437+
__update_and_free_page(h, page);
1438+
1439+
cond_resched();
1440+
}
1441+
}
1442+
static DECLARE_WORK(free_hpage_work, free_hpage_workfn);
1443+
1444+
static inline void flush_free_hpage_work(struct hstate *h)
1445+
{
1446+
if (free_vmemmap_pages_per_hpage(h))
1447+
flush_work(&free_hpage_work);
1448+
}
1449+
1450+
static void update_and_free_page(struct hstate *h, struct page *page,
1451+
bool atomic)
1452+
{
1453+
if (!free_vmemmap_pages_per_hpage(h) || !atomic) {
1454+
__update_and_free_page(h, page);
1455+
return;
1456+
}
1457+
1458+
/*
1459+
* Defer freeing to avoid using GFP_ATOMIC to allocate vmemmap pages.
1460+
*
1461+
* Only call schedule_work() if hpage_freelist is previously
1462+
* empty. Otherwise, schedule_work() had been called but the workfn
1463+
* hasn't retrieved the list yet.
1464+
*/
1465+
if (llist_add((struct llist_node *)&page->mapping, &hpage_freelist))
1466+
schedule_work(&free_hpage_work);
1467+
}
1468+
14021469
static void update_and_free_pages_bulk(struct hstate *h, struct list_head *list)
14031470
{
14041471
struct page *page, *t_page;
14051472

14061473
list_for_each_entry_safe(page, t_page, list, lru) {
1407-
update_and_free_page(h, page);
1474+
update_and_free_page(h, page, false);
14081475
cond_resched();
14091476
}
14101477
}
@@ -1471,12 +1538,12 @@ void free_huge_page(struct page *page)
14711538
if (HPageTemporary(page)) {
14721539
remove_hugetlb_page(h, page, false);
14731540
spin_unlock_irqrestore(&hugetlb_lock, flags);
1474-
update_and_free_page(h, page);
1541+
update_and_free_page(h, page, true);
14751542
} else if (h->surplus_huge_pages_node[nid]) {
14761543
/* remove the page from active list */
14771544
remove_hugetlb_page(h, page, true);
14781545
spin_unlock_irqrestore(&hugetlb_lock, flags);
1479-
update_and_free_page(h, page);
1546+
update_and_free_page(h, page, true);
14801547
} else {
14811548
arch_clear_hugepage_flags(page);
14821549
enqueue_huge_page(h, page);
@@ -1795,7 +1862,7 @@ int dissolve_free_huge_page(struct page *page)
17951862
remove_hugetlb_page(h, head, false);
17961863
h->max_huge_pages--;
17971864
spin_unlock_irq(&hugetlb_lock);
1798-
update_and_free_page(h, head);
1865+
update_and_free_page(h, head, false);
17991866
return 0;
18001867
}
18011868
out:
@@ -2411,14 +2478,14 @@ static int alloc_and_dissolve_huge_page(struct hstate *h, struct page *old_page,
24112478
* Pages have been replaced, we can safely free the old one.
24122479
*/
24132480
spin_unlock_irq(&hugetlb_lock);
2414-
update_and_free_page(h, old_page);
2481+
update_and_free_page(h, old_page, false);
24152482
}
24162483

24172484
return ret;
24182485

24192486
free_new:
24202487
spin_unlock_irq(&hugetlb_lock);
2421-
update_and_free_page(h, new_page);
2488+
update_and_free_page(h, new_page, false);
24222489

24232490
return ret;
24242491
}
@@ -2832,6 +2899,7 @@ static int set_max_huge_pages(struct hstate *h, unsigned long count, int nid,
28322899
* pages in hstate via the proc/sysfs interfaces.
28332900
*/
28342901
mutex_lock(&h->resize_lock);
2902+
flush_free_hpage_work(h);
28352903
spin_lock_irq(&hugetlb_lock);
28362904

28372905
/*
@@ -2941,6 +3009,7 @@ static int set_max_huge_pages(struct hstate *h, unsigned long count, int nid,
29413009
/* free the pages after dropping lock */
29423010
spin_unlock_irq(&hugetlb_lock);
29433011
update_and_free_pages_bulk(h, &page_list);
3012+
flush_free_hpage_work(h);
29443013
spin_lock_irq(&hugetlb_lock);
29453014

29463015
while (count < persistent_huge_pages(h)) {

mm/hugetlb_vmemmap.c

Lines changed: 0 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -180,18 +180,6 @@
180180
#define RESERVE_VMEMMAP_NR 2U
181181
#define RESERVE_VMEMMAP_SIZE (RESERVE_VMEMMAP_NR << PAGE_SHIFT)
182182

183-
/*
184-
* How many vmemmap pages associated with a HugeTLB page that can be freed
185-
* to the buddy allocator.
186-
*
187-
* Todo: Returns zero for now, which means the feature is disabled. We will
188-
* enable it once all the infrastructure is there.
189-
*/
190-
static inline unsigned int free_vmemmap_pages_per_hpage(struct hstate *h)
191-
{
192-
return 0;
193-
}
194-
195183
static inline unsigned long free_vmemmap_pages_size_per_hpage(struct hstate *h)
196184
{
197185
return (unsigned long)free_vmemmap_pages_per_hpage(h) << PAGE_SHIFT;

mm/hugetlb_vmemmap.h

Lines changed: 17 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -12,9 +12,26 @@
1212

1313
#ifdef CONFIG_HUGETLB_PAGE_FREE_VMEMMAP
1414
void free_huge_page_vmemmap(struct hstate *h, struct page *head);
15+
16+
/*
17+
* How many vmemmap pages associated with a HugeTLB page that can be freed
18+
* to the buddy allocator.
19+
*
20+
* Todo: Returns zero for now, which means the feature is disabled. We will
21+
* enable it once all the infrastructure is there.
22+
*/
23+
static inline unsigned int free_vmemmap_pages_per_hpage(struct hstate *h)
24+
{
25+
return 0;
26+
}
1527
#else
1628
static inline void free_huge_page_vmemmap(struct hstate *h, struct page *head)
1729
{
1830
}
31+
32+
static inline unsigned int free_vmemmap_pages_per_hpage(struct hstate *h)
33+
{
34+
return 0;
35+
}
1936
#endif /* CONFIG_HUGETLB_PAGE_FREE_VMEMMAP */
2037
#endif /* _LINUX_HUGETLB_VMEMMAP_H */

0 commit comments

Comments
 (0)