Skip to content

Commit bbff39c

Browse files
mjkravetzakpm00
authored andcommitted
hugetlb: allocate vma lock for all sharable vmas
The hugetlb vma lock was originally designed to synchronize pmd sharing. As such, it was only necessary to allocate the lock for vmas that were capable of pmd sharing. Later in the development cycle, it was discovered that it could also be used to simplify fault/truncation races as described in [1]. However, a subsequent change to allocate the lock for all vmas that use the page cache was never made. A fault/truncation race could leave pages in a file past i_size until the file is removed. Remove the previous restriction and allocate lock for all VM_MAYSHARE vmas. Warn in the unlikely event of allocation failure. [1] https://lore.kernel.org/lkml/Yxiv0SkMkZ0JWGGp@monkey/#t Link: https://lkml.kernel.org/r/[email protected] Fixes: "hugetlb: clean up code checking for fault/truncation races" Signed-off-by: Mike Kravetz <[email protected]> Cc: Andrea Arcangeli <[email protected]> Cc: "Aneesh Kumar K.V" <[email protected]> Cc: Axel Rasmussen <[email protected]> Cc: David Hildenbrand <[email protected]> Cc: Davidlohr Bueso <[email protected]> Cc: James Houghton <[email protected]> Cc: "Kirill A. Shutemov" <[email protected]> Cc: Miaohe Lin <[email protected]> Cc: Michal Hocko <[email protected]> Cc: Mina Almasry <[email protected]> Cc: Muchun Song <[email protected]> Cc: Naoya Horiguchi <[email protected]> Cc: Pasha Tatashin <[email protected]> Cc: Peter Xu <[email protected]> Cc: Prakash Sangappa <[email protected]> Cc: Sven Schnelle <[email protected]> Signed-off-by: Andrew Morton <[email protected]>
1 parent ecfbd73 commit bbff39c

File tree

1 file changed

+15
-35
lines changed

1 file changed

+15
-35
lines changed

mm/hugetlb.c

Lines changed: 15 additions & 35 deletions
Original file line numberDiff line numberDiff line change
@@ -6687,10 +6687,11 @@ static unsigned long page_table_shareable(struct vm_area_struct *svma,
66876687
return saddr;
66886688
}
66896689

6690-
static bool __vma_aligned_range_pmd_shareable(struct vm_area_struct *vma,
6691-
unsigned long start, unsigned long end,
6692-
bool check_vma_lock)
6690+
bool want_pmd_share(struct vm_area_struct *vma, unsigned long addr)
66936691
{
6692+
unsigned long start = addr & PUD_MASK;
6693+
unsigned long end = start + PUD_SIZE;
6694+
66946695
#ifdef CONFIG_USERFAULTFD
66956696
if (uffd_disable_huge_pmd_share(vma))
66966697
return false;
@@ -6700,38 +6701,13 @@ static bool __vma_aligned_range_pmd_shareable(struct vm_area_struct *vma,
67006701
*/
67016702
if (!(vma->vm_flags & VM_MAYSHARE))
67026703
return false;
6703-
if (check_vma_lock && !vma->vm_private_data)
6704+
if (!vma->vm_private_data) /* vma lock required for sharing */
67046705
return false;
67056706
if (!range_in_vma(vma, start, end))
67066707
return false;
67076708
return true;
67086709
}
67096710

6710-
static bool vma_pmd_shareable(struct vm_area_struct *vma)
6711-
{
6712-
unsigned long start = ALIGN(vma->vm_start, PUD_SIZE),
6713-
end = ALIGN_DOWN(vma->vm_end, PUD_SIZE);
6714-
6715-
if (start >= end)
6716-
return false;
6717-
6718-
return __vma_aligned_range_pmd_shareable(vma, start, end, false);
6719-
}
6720-
6721-
static bool vma_addr_pmd_shareable(struct vm_area_struct *vma,
6722-
unsigned long addr)
6723-
{
6724-
unsigned long start = addr & PUD_MASK;
6725-
unsigned long end = start + PUD_SIZE;
6726-
6727-
return __vma_aligned_range_pmd_shareable(vma, start, end, true);
6728-
}
6729-
6730-
bool want_pmd_share(struct vm_area_struct *vma, unsigned long addr)
6731-
{
6732-
return vma_addr_pmd_shareable(vma, addr);
6733-
}
6734-
67356711
/*
67366712
* Determine if start,end range within vma could be mapped by shared pmd.
67376713
* If yes, adjust start and end to cover range associated with possible
@@ -6880,17 +6856,21 @@ static void hugetlb_vma_lock_alloc(struct vm_area_struct *vma)
68806856
if (vma->vm_private_data)
68816857
return;
68826858

6883-
/* Check size/alignment for pmd sharing possible */
6884-
if (!vma_pmd_shareable(vma))
6885-
return;
6886-
68876859
vma_lock = kmalloc(sizeof(*vma_lock), GFP_KERNEL);
6888-
if (!vma_lock)
6860+
if (!vma_lock) {
68896861
/*
68906862
* If we can not allocate structure, then vma can not
6891-
* participate in pmd sharing.
6863+
* participate in pmd sharing. This is only a possible
6864+
* performance enhancement and memory saving issue.
6865+
* However, the lock is also used to synchronize page
6866+
* faults with truncation. If the lock is not present,
6867+
* unlikely races could leave pages in a file past i_size
6868+
* until the file is removed. Warn in the unlikely case of
6869+
* allocation failure.
68926870
*/
6871+
pr_warn_once("HugeTLB: unable to allocate vma specific lock\n");
68936872
return;
6873+
}
68946874

68956875
kref_init(&vma_lock->refs);
68966876
init_rwsem(&vma_lock->rw_sema);

0 commit comments

Comments
 (0)