Skip to content

Commit f10f144

Browse files
Muchun Songakpm00
authored andcommitted
mm: hugetlb_vmemmap: cleanup hugetlb_free_vmemmap_enabled*
The word of "free" is not expressive enough to express the feature of optimizing vmemmap pages associated with each HugeTLB, rename this keywork to "optimize". In this patch , cheanup the static key and hugetlb_free_vmemmap_enabled() to make code more expressive. Link: https://lkml.kernel.org/r/[email protected] Signed-off-by: Muchun Song <[email protected]> Cc: David Hildenbrand <[email protected]> Cc: Mike Kravetz <[email protected]> Signed-off-by: Andrew Morton <[email protected]>
1 parent 5981611 commit f10f144

File tree

4 files changed

+13
-13
lines changed

4 files changed

+13
-13
lines changed

arch/arm64/mm/flush.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -86,7 +86,7 @@ void flush_dcache_page(struct page *page)
8686
* is reused (more details can refer to the comments above
8787
* page_fixed_fake_head()).
8888
*/
89-
if (hugetlb_free_vmemmap_enabled() && PageHuge(page))
89+
if (hugetlb_optimize_vmemmap_enabled() && PageHuge(page))
9090
page = compound_head(page);
9191

9292
if (test_bit(PG_dcache_clean, &page->flags))

include/linux/page-flags.h

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -192,16 +192,16 @@ enum pageflags {
192192

193193
#ifdef CONFIG_HUGETLB_PAGE_FREE_VMEMMAP
194194
DECLARE_STATIC_KEY_MAYBE(CONFIG_HUGETLB_PAGE_FREE_VMEMMAP_DEFAULT_ON,
195-
hugetlb_free_vmemmap_enabled_key);
195+
hugetlb_optimize_vmemmap_key);
196196

197-
static __always_inline bool hugetlb_free_vmemmap_enabled(void)
197+
static __always_inline bool hugetlb_optimize_vmemmap_enabled(void)
198198
{
199199
return static_branch_maybe(CONFIG_HUGETLB_PAGE_FREE_VMEMMAP_DEFAULT_ON,
200-
&hugetlb_free_vmemmap_enabled_key);
200+
&hugetlb_optimize_vmemmap_key);
201201
}
202202

203203
/*
204-
* If the feature of freeing some vmemmap pages associated with each HugeTLB
204+
* If the feature of optimizing vmemmap pages associated with each HugeTLB
205205
* page is enabled, the head vmemmap page frame is reused and all of the tail
206206
* vmemmap addresses map to the head vmemmap page frame (furture details can
207207
* refer to the figure at the head of the mm/hugetlb_vmemmap.c). In other
@@ -218,7 +218,7 @@ static __always_inline bool hugetlb_free_vmemmap_enabled(void)
218218
*/
219219
static __always_inline const struct page *page_fixed_fake_head(const struct page *page)
220220
{
221-
if (!hugetlb_free_vmemmap_enabled())
221+
if (!hugetlb_optimize_vmemmap_enabled())
222222
return page;
223223

224224
/*
@@ -247,7 +247,7 @@ static inline const struct page *page_fixed_fake_head(const struct page *page)
247247
return page;
248248
}
249249

250-
static inline bool hugetlb_free_vmemmap_enabled(void)
250+
static inline bool hugetlb_optimize_vmemmap_enabled(void)
251251
{
252252
return false;
253253
}

mm/hugetlb_vmemmap.c

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -189,8 +189,8 @@
189189
#define RESERVE_VMEMMAP_SIZE (RESERVE_VMEMMAP_NR << PAGE_SHIFT)
190190

191191
DEFINE_STATIC_KEY_MAYBE(CONFIG_HUGETLB_PAGE_FREE_VMEMMAP_DEFAULT_ON,
192-
hugetlb_free_vmemmap_enabled_key);
193-
EXPORT_SYMBOL(hugetlb_free_vmemmap_enabled_key);
192+
hugetlb_optimize_vmemmap_key);
193+
EXPORT_SYMBOL(hugetlb_optimize_vmemmap_key);
194194

195195
static int __init hugetlb_vmemmap_early_param(char *buf)
196196
{
@@ -204,9 +204,9 @@ static int __init hugetlb_vmemmap_early_param(char *buf)
204204
return -EINVAL;
205205

206206
if (!strcmp(buf, "on"))
207-
static_branch_enable(&hugetlb_free_vmemmap_enabled_key);
207+
static_branch_enable(&hugetlb_optimize_vmemmap_key);
208208
else if (!strcmp(buf, "off"))
209-
static_branch_disable(&hugetlb_free_vmemmap_enabled_key);
209+
static_branch_disable(&hugetlb_optimize_vmemmap_key);
210210
else
211211
return -EINVAL;
212212

@@ -282,7 +282,7 @@ void __init hugetlb_vmemmap_init(struct hstate *h)
282282
BUILD_BUG_ON(__NR_USED_SUBPAGE >=
283283
RESERVE_VMEMMAP_SIZE / sizeof(struct page));
284284

285-
if (!hugetlb_free_vmemmap_enabled())
285+
if (!hugetlb_optimize_vmemmap_enabled())
286286
return;
287287

288288
vmemmap_pages = (nr_pages * sizeof(struct page)) >> PAGE_SHIFT;

mm/memory_hotplug.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1289,7 +1289,7 @@ bool mhp_supports_memmap_on_memory(unsigned long size)
12891289
* populate a single PMD.
12901290
*/
12911291
return memmap_on_memory &&
1292-
!hugetlb_free_vmemmap_enabled() &&
1292+
!hugetlb_optimize_vmemmap_enabled() &&
12931293
IS_ENABLED(CONFIG_MHP_MEMMAP_ON_MEMORY) &&
12941294
size == memory_block_size_bytes() &&
12951295
IS_ALIGNED(vmemmap_size, PMD_SIZE) &&

0 commit comments

Comments
 (0)