Skip to content

Commit 5918d10

Browse files
kiryltorvalds
authored andcommitted
thp: fix huge zero page logic for page with pfn == 0
Current implementation of huge zero page uses pfn value 0 to indicate that the page hasn't allocated yet. It assumes that buddy page allocator can't return page with pfn == 0. Let's rework the code to store 'struct page *' of huge zero page, not its pfn. This way we can avoid the weak assumption. [[email protected]: fix sparse warning] Signed-off-by: Kirill A. Shutemov <[email protected]> Reported-by: Minchan Kim <[email protected]> Acked-by: Minchan Kim <[email protected]> Reviewed-by: Andrea Arcangeli <[email protected]> Acked-by: Johannes Weiner <[email protected]> Cc: Wu Fengguang <[email protected]> Signed-off-by: Andrew Morton <[email protected]> Signed-off-by: Linus Torvalds <[email protected]>
1 parent fd0ccaf commit 5918d10

File tree

1 file changed

+22
-23
lines changed

1 file changed

+22
-23
lines changed

mm/huge_memory.c

Lines changed: 22 additions & 23 deletions
Original file line numberDiff line numberDiff line change
@@ -163,35 +163,34 @@ static int start_khugepaged(void)
163163
}
164164

165165
static atomic_t huge_zero_refcount;
166-
static unsigned long huge_zero_pfn __read_mostly;
166+
static struct page *huge_zero_page __read_mostly;
167167

168-
static inline bool is_huge_zero_pfn(unsigned long pfn)
168+
static inline bool is_huge_zero_page(struct page *page)
169169
{
170-
unsigned long zero_pfn = ACCESS_ONCE(huge_zero_pfn);
171-
return zero_pfn && pfn == zero_pfn;
170+
return ACCESS_ONCE(huge_zero_page) == page;
172171
}
173172

174173
static inline bool is_huge_zero_pmd(pmd_t pmd)
175174
{
176-
return is_huge_zero_pfn(pmd_pfn(pmd));
175+
return is_huge_zero_page(pmd_page(pmd));
177176
}
178177

179-
static unsigned long get_huge_zero_page(void)
178+
static struct page *get_huge_zero_page(void)
180179
{
181180
struct page *zero_page;
182181
retry:
183182
if (likely(atomic_inc_not_zero(&huge_zero_refcount)))
184-
return ACCESS_ONCE(huge_zero_pfn);
183+
return ACCESS_ONCE(huge_zero_page);
185184

186185
zero_page = alloc_pages((GFP_TRANSHUGE | __GFP_ZERO) & ~__GFP_MOVABLE,
187186
HPAGE_PMD_ORDER);
188187
if (!zero_page) {
189188
count_vm_event(THP_ZERO_PAGE_ALLOC_FAILED);
190-
return 0;
189+
return NULL;
191190
}
192191
count_vm_event(THP_ZERO_PAGE_ALLOC);
193192
preempt_disable();
194-
if (cmpxchg(&huge_zero_pfn, 0, page_to_pfn(zero_page))) {
193+
if (cmpxchg(&huge_zero_page, NULL, zero_page)) {
195194
preempt_enable();
196195
__free_page(zero_page);
197196
goto retry;
@@ -200,7 +199,7 @@ static unsigned long get_huge_zero_page(void)
200199
/* We take additional reference here. It will be put back by shrinker */
201200
atomic_set(&huge_zero_refcount, 2);
202201
preempt_enable();
203-
return ACCESS_ONCE(huge_zero_pfn);
202+
return ACCESS_ONCE(huge_zero_page);
204203
}
205204

206205
static void put_huge_zero_page(void)
@@ -220,9 +219,9 @@ static int shrink_huge_zero_page(struct shrinker *shrink,
220219
return atomic_read(&huge_zero_refcount) == 1 ? HPAGE_PMD_NR : 0;
221220

222221
if (atomic_cmpxchg(&huge_zero_refcount, 1, 0) == 1) {
223-
unsigned long zero_pfn = xchg(&huge_zero_pfn, 0);
224-
BUG_ON(zero_pfn == 0);
225-
__free_page(__pfn_to_page(zero_pfn));
222+
struct page *zero_page = xchg(&huge_zero_page, NULL);
223+
BUG_ON(zero_page == NULL);
224+
__free_page(zero_page);
226225
}
227226

228227
return 0;
@@ -764,12 +763,12 @@ static inline struct page *alloc_hugepage(int defrag)
764763

765764
static bool set_huge_zero_page(pgtable_t pgtable, struct mm_struct *mm,
766765
struct vm_area_struct *vma, unsigned long haddr, pmd_t *pmd,
767-
unsigned long zero_pfn)
766+
struct page *zero_page)
768767
{
769768
pmd_t entry;
770769
if (!pmd_none(*pmd))
771770
return false;
772-
entry = pfn_pmd(zero_pfn, vma->vm_page_prot);
771+
entry = mk_pmd(zero_page, vma->vm_page_prot);
773772
entry = pmd_wrprotect(entry);
774773
entry = pmd_mkhuge(entry);
775774
set_pmd_at(mm, haddr, pmd, entry);
@@ -794,20 +793,20 @@ int do_huge_pmd_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
794793
if (!(flags & FAULT_FLAG_WRITE) &&
795794
transparent_hugepage_use_zero_page()) {
796795
pgtable_t pgtable;
797-
unsigned long zero_pfn;
796+
struct page *zero_page;
798797
bool set;
799798
pgtable = pte_alloc_one(mm, haddr);
800799
if (unlikely(!pgtable))
801800
return VM_FAULT_OOM;
802-
zero_pfn = get_huge_zero_page();
803-
if (unlikely(!zero_pfn)) {
801+
zero_page = get_huge_zero_page();
802+
if (unlikely(!zero_page)) {
804803
pte_free(mm, pgtable);
805804
count_vm_event(THP_FAULT_FALLBACK);
806805
goto out;
807806
}
808807
spin_lock(&mm->page_table_lock);
809808
set = set_huge_zero_page(pgtable, mm, vma, haddr, pmd,
810-
zero_pfn);
809+
zero_page);
811810
spin_unlock(&mm->page_table_lock);
812811
if (!set) {
813812
pte_free(mm, pgtable);
@@ -886,16 +885,16 @@ int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
886885
* a page table.
887886
*/
888887
if (is_huge_zero_pmd(pmd)) {
889-
unsigned long zero_pfn;
888+
struct page *zero_page;
890889
bool set;
891890
/*
892891
* get_huge_zero_page() will never allocate a new page here,
893892
* since we already have a zero page to copy. It just takes a
894893
* reference.
895894
*/
896-
zero_pfn = get_huge_zero_page();
895+
zero_page = get_huge_zero_page();
897896
set = set_huge_zero_page(pgtable, dst_mm, vma, addr, dst_pmd,
898-
zero_pfn);
897+
zero_page);
899898
BUG_ON(!set); /* unexpected !pmd_none(dst_pmd) */
900899
ret = 0;
901900
goto out_unlock;
@@ -1812,7 +1811,7 @@ int split_huge_page_to_list(struct page *page, struct list_head *list)
18121811
struct anon_vma *anon_vma;
18131812
int ret = 1;
18141813

1815-
BUG_ON(is_huge_zero_pfn(page_to_pfn(page)));
1814+
BUG_ON(is_huge_zero_page(page));
18161815
BUG_ON(!PageAnon(page));
18171816

18181817
/*

0 commit comments

Comments
 (0)