@@ -6687,10 +6687,11 @@ static unsigned long page_table_shareable(struct vm_area_struct *svma,
6687
6687
return saddr ;
6688
6688
}
6689
6689
6690
- static bool __vma_aligned_range_pmd_shareable (struct vm_area_struct * vma ,
6691
- unsigned long start , unsigned long end ,
6692
- bool check_vma_lock )
6690
+ bool want_pmd_share (struct vm_area_struct * vma , unsigned long addr )
6693
6691
{
6692
+ unsigned long start = addr & PUD_MASK ;
6693
+ unsigned long end = start + PUD_SIZE ;
6694
+
6694
6695
#ifdef CONFIG_USERFAULTFD
6695
6696
if (uffd_disable_huge_pmd_share (vma ))
6696
6697
return false;
@@ -6700,38 +6701,13 @@ static bool __vma_aligned_range_pmd_shareable(struct vm_area_struct *vma,
6700
6701
*/
6701
6702
if (!(vma -> vm_flags & VM_MAYSHARE ))
6702
6703
return false;
6703
- if (check_vma_lock && !vma -> vm_private_data )
6704
+ if (!vma -> vm_private_data ) /* vma lock required for sharing */
6704
6705
return false;
6705
6706
if (!range_in_vma (vma , start , end ))
6706
6707
return false;
6707
6708
return true;
6708
6709
}
6709
6710
6710
- static bool vma_pmd_shareable (struct vm_area_struct * vma )
6711
- {
6712
- unsigned long start = ALIGN (vma -> vm_start , PUD_SIZE ),
6713
- end = ALIGN_DOWN (vma -> vm_end , PUD_SIZE );
6714
-
6715
- if (start >= end )
6716
- return false;
6717
-
6718
- return __vma_aligned_range_pmd_shareable (vma , start , end , false);
6719
- }
6720
-
6721
- static bool vma_addr_pmd_shareable (struct vm_area_struct * vma ,
6722
- unsigned long addr )
6723
- {
6724
- unsigned long start = addr & PUD_MASK ;
6725
- unsigned long end = start + PUD_SIZE ;
6726
-
6727
- return __vma_aligned_range_pmd_shareable (vma , start , end , true);
6728
- }
6729
-
6730
- bool want_pmd_share (struct vm_area_struct * vma , unsigned long addr )
6731
- {
6732
- return vma_addr_pmd_shareable (vma , addr );
6733
- }
6734
-
6735
6711
/*
6736
6712
* Determine if start,end range within vma could be mapped by shared pmd.
6737
6713
* If yes, adjust start and end to cover range associated with possible
@@ -6880,17 +6856,21 @@ static void hugetlb_vma_lock_alloc(struct vm_area_struct *vma)
6880
6856
if (vma -> vm_private_data )
6881
6857
return ;
6882
6858
6883
- /* Check size/alignment for pmd sharing possible */
6884
- if (!vma_pmd_shareable (vma ))
6885
- return ;
6886
-
6887
6859
vma_lock = kmalloc (sizeof (* vma_lock ), GFP_KERNEL );
6888
- if (!vma_lock )
6860
+ if (!vma_lock ) {
6889
6861
/*
6890
6862
* If we can not allocate structure, then vma can not
6891
- * participate in pmd sharing.
6863
+ * participate in pmd sharing. This is only a possible
6864
+ * performance enhancement and memory saving issue.
6865
+ * However, the lock is also used to synchronize page
6866
+ * faults with truncation. If the lock is not present,
6867
+ * unlikely races could leave pages in a file past i_size
6868
+ * until the file is removed. Warn in the unlikely case of
6869
+ * allocation failure.
6892
6870
*/
6871
+ pr_warn_once ("HugeTLB: unable to allocate vma specific lock\n" );
6893
6872
return ;
6873
+ }
6894
6874
6895
6875
kref_init (& vma_lock -> refs );
6896
6876
init_rwsem (& vma_lock -> rw_sema );
0 commit comments