@@ -5918,19 +5918,18 @@ static void unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
5918
5918
* cannot race with other handlers or page migration.
5919
5919
* Keep the pte_same checks anyway to make transition from the mutex easier.
5920
5920
*/
5921
- static vm_fault_t hugetlb_wp (struct mm_struct * mm , struct vm_area_struct * vma ,
5922
- unsigned long address , pte_t * ptep , unsigned int flags ,
5923
- struct folio * pagecache_folio , spinlock_t * ptl ,
5921
+ static vm_fault_t hugetlb_wp (struct folio * pagecache_folio ,
5924
5922
struct vm_fault * vmf )
5925
5923
{
5926
- const bool unshare = flags & FAULT_FLAG_UNSHARE ;
5927
- pte_t pte = huge_ptep_get (ptep );
5924
+ struct vm_area_struct * vma = vmf -> vma ;
5925
+ struct mm_struct * mm = vma -> vm_mm ;
5926
+ const bool unshare = vmf -> flags & FAULT_FLAG_UNSHARE ;
5927
+ pte_t pte = huge_ptep_get (vmf -> pte );
5928
5928
struct hstate * h = hstate_vma (vma );
5929
5929
struct folio * old_folio ;
5930
5930
struct folio * new_folio ;
5931
5931
int outside_reserve = 0 ;
5932
5932
vm_fault_t ret = 0 ;
5933
- unsigned long haddr = address & huge_page_mask (h );
5934
5933
struct mmu_notifier_range range ;
5935
5934
5936
5935
/*
@@ -5953,7 +5952,7 @@ static vm_fault_t hugetlb_wp(struct mm_struct *mm, struct vm_area_struct *vma,
5953
5952
5954
5953
/* Let's take out MAP_SHARED mappings first. */
5955
5954
if (vma -> vm_flags & VM_MAYSHARE ) {
5956
- set_huge_ptep_writable (vma , haddr , ptep );
5955
+ set_huge_ptep_writable (vma , vmf -> address , vmf -> pte );
5957
5956
return 0 ;
5958
5957
}
5959
5958
@@ -5972,7 +5971,7 @@ static vm_fault_t hugetlb_wp(struct mm_struct *mm, struct vm_area_struct *vma,
5972
5971
SetPageAnonExclusive (& old_folio -> page );
5973
5972
}
5974
5973
if (likely (!unshare ))
5975
- set_huge_ptep_writable (vma , haddr , ptep );
5974
+ set_huge_ptep_writable (vma , vmf -> address , vmf -> pte );
5976
5975
5977
5976
delayacct_wpcopy_end ();
5978
5977
return 0 ;
@@ -5999,8 +5998,8 @@ static vm_fault_t hugetlb_wp(struct mm_struct *mm, struct vm_area_struct *vma,
5999
5998
* Drop page table lock as buddy allocator may be called. It will
6000
5999
* be acquired again before returning to the caller, as expected.
6001
6000
*/
6002
- spin_unlock (ptl );
6003
- new_folio = alloc_hugetlb_folio (vma , haddr , outside_reserve );
6001
+ spin_unlock (vmf -> ptl );
6002
+ new_folio = alloc_hugetlb_folio (vma , vmf -> address , outside_reserve );
6004
6003
6005
6004
if (IS_ERR (new_folio )) {
6006
6005
/*
@@ -6025,19 +6024,21 @@ static vm_fault_t hugetlb_wp(struct mm_struct *mm, struct vm_area_struct *vma,
6025
6024
*
6026
6025
* Reacquire both after unmap operation.
6027
6026
*/
6028
- idx = vma_hugecache_offset (h , vma , haddr );
6027
+ idx = vma_hugecache_offset (h , vma , vmf -> address );
6029
6028
hash = hugetlb_fault_mutex_hash (mapping , idx );
6030
6029
hugetlb_vma_unlock_read (vma );
6031
6030
mutex_unlock (& hugetlb_fault_mutex_table [hash ]);
6032
6031
6033
- unmap_ref_private (mm , vma , & old_folio -> page , haddr );
6032
+ unmap_ref_private (mm , vma , & old_folio -> page ,
6033
+ vmf -> address );
6034
6034
6035
6035
mutex_lock (& hugetlb_fault_mutex_table [hash ]);
6036
6036
hugetlb_vma_lock_read (vma );
6037
- spin_lock (ptl );
6038
- ptep = hugetlb_walk (vma , haddr , huge_page_size (h ));
6039
- if (likely (ptep &&
6040
- pte_same (huge_ptep_get (ptep ), pte )))
6037
+ spin_lock (vmf -> ptl );
6038
+ vmf -> pte = hugetlb_walk (vma , vmf -> address ,
6039
+ huge_page_size (h ));
6040
+ if (likely (vmf -> pte &&
6041
+ pte_same (huge_ptep_get (vmf -> pte ), pte )))
6041
6042
goto retry_avoidcopy ;
6042
6043
/*
6043
6044
* race occurs while re-acquiring page table
@@ -6059,50 +6060,51 @@ static vm_fault_t hugetlb_wp(struct mm_struct *mm, struct vm_area_struct *vma,
6059
6060
if (unlikely (ret ))
6060
6061
goto out_release_all ;
6061
6062
6062
- if (copy_user_large_folio (new_folio , old_folio , address , vma )) {
6063
+ if (copy_user_large_folio (new_folio , old_folio , vmf -> real_address , vma )) {
6063
6064
ret = VM_FAULT_HWPOISON_LARGE ;
6064
6065
goto out_release_all ;
6065
6066
}
6066
6067
__folio_mark_uptodate (new_folio );
6067
6068
6068
- mmu_notifier_range_init (& range , MMU_NOTIFY_CLEAR , 0 , mm , haddr ,
6069
- haddr + huge_page_size (h ));
6069
+ mmu_notifier_range_init (& range , MMU_NOTIFY_CLEAR , 0 , mm , vmf -> address ,
6070
+ vmf -> address + huge_page_size (h ));
6070
6071
mmu_notifier_invalidate_range_start (& range );
6071
6072
6072
6073
/*
6073
6074
* Retake the page table lock to check for racing updates
6074
6075
* before the page tables are altered
6075
6076
*/
6076
- spin_lock (ptl );
6077
- ptep = hugetlb_walk (vma , haddr , huge_page_size (h ));
6078
- if (likely (ptep && pte_same (huge_ptep_get (ptep ), pte ))) {
6077
+ spin_lock (vmf -> ptl );
6078
+ vmf -> pte = hugetlb_walk (vma , vmf -> address , huge_page_size (h ));
6079
+ if (likely (vmf -> pte && pte_same (huge_ptep_get (vmf -> pte ), pte ))) {
6079
6080
pte_t newpte = make_huge_pte (vma , & new_folio -> page , !unshare );
6080
6081
6081
6082
/* Break COW or unshare */
6082
- huge_ptep_clear_flush (vma , haddr , ptep );
6083
+ huge_ptep_clear_flush (vma , vmf -> address , vmf -> pte );
6083
6084
hugetlb_remove_rmap (old_folio );
6084
- hugetlb_add_new_anon_rmap (new_folio , vma , haddr );
6085
+ hugetlb_add_new_anon_rmap (new_folio , vma , vmf -> address );
6085
6086
if (huge_pte_uffd_wp (pte ))
6086
6087
newpte = huge_pte_mkuffd_wp (newpte );
6087
- set_huge_pte_at (mm , haddr , ptep , newpte , huge_page_size (h ));
6088
+ set_huge_pte_at (mm , vmf -> address , vmf -> pte , newpte ,
6089
+ huge_page_size (h ));
6088
6090
folio_set_hugetlb_migratable (new_folio );
6089
6091
/* Make the old page be freed below */
6090
6092
new_folio = old_folio ;
6091
6093
}
6092
- spin_unlock (ptl );
6094
+ spin_unlock (vmf -> ptl );
6093
6095
mmu_notifier_invalidate_range_end (& range );
6094
6096
out_release_all :
6095
6097
/*
6096
6098
* No restore in case of successful pagetable update (Break COW or
6097
6099
* unshare)
6098
6100
*/
6099
6101
if (new_folio != old_folio )
6100
- restore_reserve_on_error (h , vma , haddr , new_folio );
6102
+ restore_reserve_on_error (h , vma , vmf -> address , new_folio );
6101
6103
folio_put (new_folio );
6102
6104
out_release_old :
6103
6105
folio_put (old_folio );
6104
6106
6105
- spin_lock (ptl ); /* Caller expects lock to be held */
6107
+ spin_lock (vmf -> ptl ); /* Caller expects lock to be held */
6106
6108
6107
6109
delayacct_wpcopy_end ();
6108
6110
return ret ;
@@ -6369,8 +6371,7 @@ static vm_fault_t hugetlb_no_page(struct address_space *mapping,
6369
6371
hugetlb_count_add (pages_per_huge_page (h ), mm );
6370
6372
if ((vmf -> flags & FAULT_FLAG_WRITE ) && !(vma -> vm_flags & VM_SHARED )) {
6371
6373
/* Optimization, do the COW without a second fault */
6372
- ret = hugetlb_wp (mm , vma , vmf -> real_address , vmf -> pte ,
6373
- vmf -> flags , folio , vmf -> ptl , vmf );
6374
+ ret = hugetlb_wp (folio , vmf );
6374
6375
}
6375
6376
6376
6377
spin_unlock (vmf -> ptl );
@@ -6583,8 +6584,7 @@ vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
6583
6584
6584
6585
if (flags & (FAULT_FLAG_WRITE |FAULT_FLAG_UNSHARE )) {
6585
6586
if (!huge_pte_write (vmf .orig_pte )) {
6586
- ret = hugetlb_wp (mm , vma , address , vmf .pte , flags ,
6587
- pagecache_folio , vmf .ptl , & vmf );
6587
+ ret = hugetlb_wp (pagecache_folio , & vmf );
6588
6588
goto out_put_page ;
6589
6589
} else if (likely (flags & FAULT_FLAG_WRITE )) {
6590
6590
vmf .orig_pte = huge_pte_mkdirty (vmf .orig_pte );
0 commit comments