Skip to content

Commit 5981611

Browse files
Muchun Songakpm00
authored andcommitted
mm: hugetlb_vmemmap: cleanup hugetlb_vmemmap related functions
Patch series "cleanup hugetlb_vmemmap". The word of "free" is not expressive enough to express the feature of optimizing vmemmap pages associated with each HugeTLB, rename this keywork to "optimize" is more clear. In this series, cheanup related codes to make it more clear and expressive. This is suggested by David. This patch (of 3): The word of "free" is not expressive enough to express the feature of optimizing vmemmap pages associated with each HugeTLB, rename this keywork to "optimize". And some function names are prefixed with "huge_page" instead of "hugetlb", it is easily to be confused with THP. In this patch, cheanup related functions to make code more clear and expressive. Link: https://lkml.kernel.org/r/[email protected] Link: https://lkml.kernel.org/r/[email protected] Signed-off-by: Muchun Song <[email protected]> Cc: David Hildenbrand <[email protected]> Cc: Mike Kravetz <[email protected]> Signed-off-by: Andrew Morton <[email protected]>
1 parent aa282a1 commit 5981611

File tree

4 files changed

+36
-38
lines changed

4 files changed

+36
-38
lines changed

include/linux/hugetlb.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -624,7 +624,7 @@ struct hstate {
624624
unsigned int free_huge_pages_node[MAX_NUMNODES];
625625
unsigned int surplus_huge_pages_node[MAX_NUMNODES];
626626
#ifdef CONFIG_HUGETLB_PAGE_FREE_VMEMMAP
627-
unsigned int nr_free_vmemmap_pages;
627+
unsigned int optimize_vmemmap_pages;
628628
#endif
629629
#ifdef CONFIG_CGROUP_HUGETLB
630630
/* cgroup control files */

mm/hugetlb.c

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -1540,7 +1540,7 @@ static void __update_and_free_page(struct hstate *h, struct page *page)
15401540
if (hstate_is_gigantic(h) && !gigantic_page_runtime_supported())
15411541
return;
15421542

1543-
if (alloc_huge_page_vmemmap(h, page)) {
1543+
if (hugetlb_vmemmap_alloc(h, page)) {
15441544
spin_lock_irq(&hugetlb_lock);
15451545
/*
15461546
* If we cannot allocate vmemmap pages, just refuse to free the
@@ -1617,7 +1617,7 @@ static DECLARE_WORK(free_hpage_work, free_hpage_workfn);
16171617

16181618
static inline void flush_free_hpage_work(struct hstate *h)
16191619
{
1620-
if (free_vmemmap_pages_per_hpage(h))
1620+
if (hugetlb_optimize_vmemmap_pages(h))
16211621
flush_work(&free_hpage_work);
16221622
}
16231623

@@ -1737,7 +1737,7 @@ static void __prep_account_new_huge_page(struct hstate *h, int nid)
17371737

17381738
static void __prep_new_huge_page(struct hstate *h, struct page *page)
17391739
{
1740-
free_huge_page_vmemmap(h, page);
1740+
hugetlb_vmemmap_free(h, page);
17411741
INIT_LIST_HEAD(&page->lru);
17421742
set_compound_page_dtor(page, HUGETLB_PAGE_DTOR);
17431743
hugetlb_set_page_subpool(page, NULL);
@@ -2110,7 +2110,7 @@ int dissolve_free_huge_page(struct page *page)
21102110
* Attempt to allocate vmemmmap here so that we can take
21112111
* appropriate action on failure.
21122112
*/
2113-
rc = alloc_huge_page_vmemmap(h, head);
2113+
rc = hugetlb_vmemmap_alloc(h, head);
21142114
if (!rc) {
21152115
/*
21162116
* Move PageHWPoison flag from head page to the raw
@@ -3425,7 +3425,7 @@ static int demote_free_huge_page(struct hstate *h, struct page *page)
34253425
remove_hugetlb_page_for_demote(h, page, false);
34263426
spin_unlock_irq(&hugetlb_lock);
34273427

3428-
rc = alloc_huge_page_vmemmap(h, page);
3428+
rc = hugetlb_vmemmap_alloc(h, page);
34293429
if (rc) {
34303430
/* Allocation of vmemmmap failed, we can not demote page */
34313431
spin_lock_irq(&hugetlb_lock);

mm/hugetlb_vmemmap.c

Lines changed: 20 additions & 22 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
// SPDX-License-Identifier: GPL-2.0
22
/*
3-
* Free some vmemmap pages of HugeTLB
3+
* Optimize vmemmap pages associated with HugeTLB
44
*
55
* Copyright (c) 2020, Bytedance. All rights reserved.
66
*
@@ -192,7 +192,7 @@ DEFINE_STATIC_KEY_MAYBE(CONFIG_HUGETLB_PAGE_FREE_VMEMMAP_DEFAULT_ON,
192192
hugetlb_free_vmemmap_enabled_key);
193193
EXPORT_SYMBOL(hugetlb_free_vmemmap_enabled_key);
194194

195-
static int __init early_hugetlb_free_vmemmap_param(char *buf)
195+
static int __init hugetlb_vmemmap_early_param(char *buf)
196196
{
197197
/* We cannot optimize if a "struct page" crosses page boundaries. */
198198
if (!is_power_of_2(sizeof(struct page))) {
@@ -212,29 +212,26 @@ static int __init early_hugetlb_free_vmemmap_param(char *buf)
212212

213213
return 0;
214214
}
215-
early_param("hugetlb_free_vmemmap", early_hugetlb_free_vmemmap_param);
216-
217-
static inline unsigned long free_vmemmap_pages_size_per_hpage(struct hstate *h)
218-
{
219-
return (unsigned long)free_vmemmap_pages_per_hpage(h) << PAGE_SHIFT;
220-
}
215+
early_param("hugetlb_free_vmemmap", hugetlb_vmemmap_early_param);
221216

222217
/*
223218
* Previously discarded vmemmap pages will be allocated and remapping
224219
* after this function returns zero.
225220
*/
226-
int alloc_huge_page_vmemmap(struct hstate *h, struct page *head)
221+
int hugetlb_vmemmap_alloc(struct hstate *h, struct page *head)
227222
{
228223
int ret;
229224
unsigned long vmemmap_addr = (unsigned long)head;
230-
unsigned long vmemmap_end, vmemmap_reuse;
225+
unsigned long vmemmap_end, vmemmap_reuse, vmemmap_pages;
231226

232227
if (!HPageVmemmapOptimized(head))
233228
return 0;
234229

235-
vmemmap_addr += RESERVE_VMEMMAP_SIZE;
236-
vmemmap_end = vmemmap_addr + free_vmemmap_pages_size_per_hpage(h);
237-
vmemmap_reuse = vmemmap_addr - PAGE_SIZE;
230+
vmemmap_addr += RESERVE_VMEMMAP_SIZE;
231+
vmemmap_pages = hugetlb_optimize_vmemmap_pages(h);
232+
vmemmap_end = vmemmap_addr + (vmemmap_pages << PAGE_SHIFT);
233+
vmemmap_reuse = vmemmap_addr - PAGE_SIZE;
234+
238235
/*
239236
* The pages which the vmemmap virtual address range [@vmemmap_addr,
240237
* @vmemmap_end) are mapped to are freed to the buddy allocator, and
@@ -250,17 +247,18 @@ int alloc_huge_page_vmemmap(struct hstate *h, struct page *head)
250247
return ret;
251248
}
252249

253-
void free_huge_page_vmemmap(struct hstate *h, struct page *head)
250+
void hugetlb_vmemmap_free(struct hstate *h, struct page *head)
254251
{
255252
unsigned long vmemmap_addr = (unsigned long)head;
256-
unsigned long vmemmap_end, vmemmap_reuse;
253+
unsigned long vmemmap_end, vmemmap_reuse, vmemmap_pages;
257254

258-
if (!free_vmemmap_pages_per_hpage(h))
255+
vmemmap_pages = hugetlb_optimize_vmemmap_pages(h);
256+
if (!vmemmap_pages)
259257
return;
260258

261-
vmemmap_addr += RESERVE_VMEMMAP_SIZE;
262-
vmemmap_end = vmemmap_addr + free_vmemmap_pages_size_per_hpage(h);
263-
vmemmap_reuse = vmemmap_addr - PAGE_SIZE;
259+
vmemmap_addr += RESERVE_VMEMMAP_SIZE;
260+
vmemmap_end = vmemmap_addr + (vmemmap_pages << PAGE_SHIFT);
261+
vmemmap_reuse = vmemmap_addr - PAGE_SIZE;
264262

265263
/*
266264
* Remap the vmemmap virtual address range [@vmemmap_addr, @vmemmap_end)
@@ -297,8 +295,8 @@ void __init hugetlb_vmemmap_init(struct hstate *h)
297295
* hugetlbpage.rst for more details.
298296
*/
299297
if (likely(vmemmap_pages > RESERVE_VMEMMAP_NR))
300-
h->nr_free_vmemmap_pages = vmemmap_pages - RESERVE_VMEMMAP_NR;
298+
h->optimize_vmemmap_pages = vmemmap_pages - RESERVE_VMEMMAP_NR;
301299

302-
pr_info("can free %d vmemmap pages for %s\n", h->nr_free_vmemmap_pages,
303-
h->name);
300+
pr_info("can optimize %d vmemmap pages for %s\n",
301+
h->optimize_vmemmap_pages, h->name);
304302
}

mm/hugetlb_vmemmap.h

Lines changed: 10 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
// SPDX-License-Identifier: GPL-2.0
22
/*
3-
* Free some vmemmap pages of HugeTLB
3+
* Optimize vmemmap pages associated with HugeTLB
44
*
55
* Copyright (c) 2020, Bytedance. All rights reserved.
66
*
@@ -11,33 +11,33 @@
1111
#include <linux/hugetlb.h>
1212

1313
#ifdef CONFIG_HUGETLB_PAGE_FREE_VMEMMAP
14-
int alloc_huge_page_vmemmap(struct hstate *h, struct page *head);
15-
void free_huge_page_vmemmap(struct hstate *h, struct page *head);
14+
int hugetlb_vmemmap_alloc(struct hstate *h, struct page *head);
15+
void hugetlb_vmemmap_free(struct hstate *h, struct page *head);
1616
void hugetlb_vmemmap_init(struct hstate *h);
1717

1818
/*
19-
* How many vmemmap pages associated with a HugeTLB page that can be freed
20-
* to the buddy allocator.
19+
* How many vmemmap pages associated with a HugeTLB page that can be
20+
* optimized and freed to the buddy allocator.
2121
*/
22-
static inline unsigned int free_vmemmap_pages_per_hpage(struct hstate *h)
22+
static inline unsigned int hugetlb_optimize_vmemmap_pages(struct hstate *h)
2323
{
24-
return h->nr_free_vmemmap_pages;
24+
return h->optimize_vmemmap_pages;
2525
}
2626
#else
27-
static inline int alloc_huge_page_vmemmap(struct hstate *h, struct page *head)
27+
static inline int hugetlb_vmemmap_alloc(struct hstate *h, struct page *head)
2828
{
2929
return 0;
3030
}
3131

32-
static inline void free_huge_page_vmemmap(struct hstate *h, struct page *head)
32+
static inline void hugetlb_vmemmap_free(struct hstate *h, struct page *head)
3333
{
3434
}
3535

3636
static inline void hugetlb_vmemmap_init(struct hstate *h)
3737
{
3838
}
3939

40-
static inline unsigned int free_vmemmap_pages_per_hpage(struct hstate *h)
40+
static inline unsigned int hugetlb_optimize_vmemmap_pages(struct hstate *h)
4141
{
4242
return 0;
4343
}

0 commit comments

Comments
 (0)