Skip to content

Commit 78ca0e6

Browse files
kiryltorvalds
authored andcommitted
thp: lazy huge zero page allocation
Instead of allocating huge zero page on hugepage_init() we can postpone it until first huge zero page map. It saves memory if THP is not in use. cmpxchg() is used to avoid race on huge_zero_pfn initialization. Signed-off-by: Kirill A. Shutemov <[email protected]> Cc: Andrea Arcangeli <[email protected]> Cc: Andi Kleen <[email protected]> Cc: "H. Peter Anvin" <[email protected]> Cc: Mel Gorman <[email protected]> Cc: David Rientjes <[email protected]> Signed-off-by: Andrew Morton <[email protected]> Signed-off-by: Linus Torvalds <[email protected]>
1 parent 8037195 commit 78ca0e6

File tree

1 file changed

+10
-10
lines changed

1 file changed

+10
-10
lines changed

mm/huge_memory.c

Lines changed: 10 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -160,22 +160,24 @@ static int start_khugepaged(void)
160160
return err;
161161
}
162162

163-
static int __init init_huge_zero_page(void)
163+
static int init_huge_zero_pfn(void)
164164
{
165165
struct page *hpage;
166+
unsigned long pfn;
166167

167168
hpage = alloc_pages((GFP_TRANSHUGE | __GFP_ZERO) & ~__GFP_MOVABLE,
168169
HPAGE_PMD_ORDER);
169170
if (!hpage)
170171
return -ENOMEM;
171-
172-
huge_zero_pfn = page_to_pfn(hpage);
172+
pfn = page_to_pfn(hpage);
173+
if (cmpxchg(&huge_zero_pfn, 0, pfn))
174+
__free_page(hpage);
173175
return 0;
174176
}
175177

176178
static inline bool is_huge_zero_pfn(unsigned long pfn)
177179
{
178-
return pfn == huge_zero_pfn;
180+
return huge_zero_pfn && pfn == huge_zero_pfn;
179181
}
180182

181183
static inline bool is_huge_zero_pmd(pmd_t pmd)
@@ -564,10 +566,6 @@ static int __init hugepage_init(void)
564566
if (err)
565567
return err;
566568

567-
err = init_huge_zero_page();
568-
if (err)
569-
goto out;
570-
571569
err = khugepaged_slab_init();
572570
if (err)
573571
goto out;
@@ -590,8 +588,6 @@ static int __init hugepage_init(void)
590588

591589
return 0;
592590
out:
593-
if (huge_zero_pfn)
594-
__free_page(pfn_to_page(huge_zero_pfn));
595591
hugepage_exit_sysfs(hugepage_kobj);
596592
return err;
597593
}
@@ -735,6 +731,10 @@ int do_huge_pmd_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
735731
return VM_FAULT_OOM;
736732
if (!(flags & FAULT_FLAG_WRITE)) {
737733
pgtable_t pgtable;
734+
if (unlikely(!huge_zero_pfn && init_huge_zero_pfn())) {
735+
count_vm_event(THP_FAULT_FALLBACK);
736+
goto out;
737+
}
738738
pgtable = pte_alloc_one(mm, haddr);
739739
if (unlikely(!pgtable))
740740
return VM_FAULT_OOM;

0 commit comments

Comments
 (0)