Skip to content

Commit fac35ba

Browse files
Baolin Wangakpm00
authored andcommitted
mm/hugetlb: fix races when looking up a CONT-PTE/PMD size hugetlb page
On some architectures (like ARM64), it can support CONT-PTE/PMD size hugetlb, which means it can support not only PMD/PUD size hugetlb (2M and 1G), but also CONT-PTE/PMD size(64K and 32M) if a 4K page size specified. So when looking up a CONT-PTE size hugetlb page by follow_page(), it will use pte_offset_map_lock() to get the pte entry lock for the CONT-PTE size hugetlb in follow_page_pte(). However this pte entry lock is incorrect for the CONT-PTE size hugetlb, since we should use huge_pte_lock() to get the correct lock, which is mm->page_table_lock. That means the pte entry of the CONT-PTE size hugetlb under current pte lock is unstable in follow_page_pte(), we can continue to migrate or poison the pte entry of the CONT-PTE size hugetlb, which can cause some potential race issues, even though they are under the 'pte lock'. For example, suppose thread A is trying to look up a CONT-PTE size hugetlb page by move_pages() syscall under the lock, however antoher thread B can migrate the CONT-PTE hugetlb page at the same time, which will cause thread A to get an incorrect page, if thread A also wants to do page migration, then data inconsistency error occurs. Moreover we have the same issue for CONT-PMD size hugetlb in follow_huge_pmd(). To fix above issues, rename the follow_huge_pmd() as follow_huge_pmd_pte() to handle PMD and PTE level size hugetlb, which uses huge_pte_lock() to get the correct pte entry lock to make the pte entry stable. Mike said: Support for CONT_PMD/_PTE was added with bb9dd3d ("arm64: hugetlb: refactor find_num_contig()"). Patch series "Support for contiguous pte hugepages", v4. However, I do not believe these code paths were executed until migration support was added with 5480280 ("arm64/mm: enable HugeTLB migration for contiguous bit HugeTLB pages") I would go with 5480280 for the Fixes: targe. Link: https://lkml.kernel.org/r/635f43bdd85ac2615a58405da82b4d33c6e5eb05.1662017562.git.baolin.wang@linux.alibaba.com Fixes: 5480280 ("arm64/mm: enable HugeTLB migration for contiguous bit HugeTLB pages") Signed-off-by: Baolin Wang <[email protected]> Suggested-by: Mike Kravetz <[email protected]> Reviewed-by: Mike Kravetz <[email protected]> Cc: David Hildenbrand <[email protected]> Cc: Muchun Song <[email protected]> Cc: <[email protected]> Signed-off-by: Andrew Morton <[email protected]>
1 parent 1c8e234 commit fac35ba

File tree

3 files changed

+30
-19
lines changed

3 files changed

+30
-19
lines changed

include/linux/hugetlb.h

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -207,8 +207,8 @@ struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address,
207207
struct page *follow_huge_pd(struct vm_area_struct *vma,
208208
unsigned long address, hugepd_t hpd,
209209
int flags, int pdshift);
210-
struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
211-
pmd_t *pmd, int flags);
210+
struct page *follow_huge_pmd_pte(struct vm_area_struct *vma, unsigned long address,
211+
int flags);
212212
struct page *follow_huge_pud(struct mm_struct *mm, unsigned long address,
213213
pud_t *pud, int flags);
214214
struct page *follow_huge_pgd(struct mm_struct *mm, unsigned long address,
@@ -312,8 +312,8 @@ static inline struct page *follow_huge_pd(struct vm_area_struct *vma,
312312
return NULL;
313313
}
314314

315-
static inline struct page *follow_huge_pmd(struct mm_struct *mm,
316-
unsigned long address, pmd_t *pmd, int flags)
315+
static inline struct page *follow_huge_pmd_pte(struct vm_area_struct *vma,
316+
unsigned long address, int flags)
317317
{
318318
return NULL;
319319
}

mm/gup.c

Lines changed: 13 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -530,6 +530,18 @@ static struct page *follow_page_pte(struct vm_area_struct *vma,
530530
if (WARN_ON_ONCE((flags & (FOLL_PIN | FOLL_GET)) ==
531531
(FOLL_PIN | FOLL_GET)))
532532
return ERR_PTR(-EINVAL);
533+
534+
/*
535+
* Considering PTE level hugetlb, like continuous-PTE hugetlb on
536+
* ARM64 architecture.
537+
*/
538+
if (is_vm_hugetlb_page(vma)) {
539+
page = follow_huge_pmd_pte(vma, address, flags);
540+
if (page)
541+
return page;
542+
return no_page_table(vma, flags);
543+
}
544+
533545
retry:
534546
if (unlikely(pmd_bad(*pmd)))
535547
return no_page_table(vma, flags);
@@ -662,7 +674,7 @@ static struct page *follow_pmd_mask(struct vm_area_struct *vma,
662674
if (pmd_none(pmdval))
663675
return no_page_table(vma, flags);
664676
if (pmd_huge(pmdval) && is_vm_hugetlb_page(vma)) {
665-
page = follow_huge_pmd(mm, address, pmd, flags);
677+
page = follow_huge_pmd_pte(vma, address, flags);
666678
if (page)
667679
return page;
668680
return no_page_table(vma, flags);

mm/hugetlb.c

Lines changed: 13 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -6946,12 +6946,13 @@ follow_huge_pd(struct vm_area_struct *vma,
69466946
}
69476947

69486948
struct page * __weak
6949-
follow_huge_pmd(struct mm_struct *mm, unsigned long address,
6950-
pmd_t *pmd, int flags)
6949+
follow_huge_pmd_pte(struct vm_area_struct *vma, unsigned long address, int flags)
69516950
{
6951+
struct hstate *h = hstate_vma(vma);
6952+
struct mm_struct *mm = vma->vm_mm;
69526953
struct page *page = NULL;
69536954
spinlock_t *ptl;
6954-
pte_t pte;
6955+
pte_t *ptep, pte;
69556956

69566957
/*
69576958
* FOLL_PIN is not supported for follow_page(). Ordinary GUP goes via
@@ -6961,17 +6962,15 @@ follow_huge_pmd(struct mm_struct *mm, unsigned long address,
69616962
return NULL;
69626963

69636964
retry:
6964-
ptl = pmd_lockptr(mm, pmd);
6965-
spin_lock(ptl);
6966-
/*
6967-
* make sure that the address range covered by this pmd is not
6968-
* unmapped from other threads.
6969-
*/
6970-
if (!pmd_huge(*pmd))
6971-
goto out;
6972-
pte = huge_ptep_get((pte_t *)pmd);
6965+
ptep = huge_pte_offset(mm, address, huge_page_size(h));
6966+
if (!ptep)
6967+
return NULL;
6968+
6969+
ptl = huge_pte_lock(h, mm, ptep);
6970+
pte = huge_ptep_get(ptep);
69736971
if (pte_present(pte)) {
6974-
page = pmd_page(*pmd) + ((address & ~PMD_MASK) >> PAGE_SHIFT);
6972+
page = pte_page(pte) +
6973+
((address & ~huge_page_mask(h)) >> PAGE_SHIFT);
69756974
/*
69766975
* try_grab_page() should always succeed here, because: a) we
69776976
* hold the pmd (ptl) lock, and b) we've just checked that the
@@ -6987,7 +6986,7 @@ follow_huge_pmd(struct mm_struct *mm, unsigned long address,
69876986
} else {
69886987
if (is_hugetlb_entry_migration(pte)) {
69896988
spin_unlock(ptl);
6990-
__migration_entry_wait_huge((pte_t *)pmd, ptl);
6989+
__migration_entry_wait_huge(ptep, ptl);
69916990
goto retry;
69926991
}
69936992
/*

0 commit comments

Comments
 (0)