@@ -6188,23 +6188,19 @@ static bool hugetlb_pte_stable(struct hstate *h, struct mm_struct *mm,
6188
6188
return same ;
6189
6189
}
6190
6190
6191
- static vm_fault_t hugetlb_no_page (struct mm_struct * mm ,
6192
- struct vm_area_struct * vma ,
6193
- struct address_space * mapping , pgoff_t idx ,
6194
- unsigned long address , pte_t * ptep ,
6195
- pte_t old_pte , unsigned int flags ,
6191
+ static vm_fault_t hugetlb_no_page (struct address_space * mapping ,
6196
6192
struct vm_fault * vmf )
6197
6193
{
6194
+ struct vm_area_struct * vma = vmf -> vma ;
6195
+ struct mm_struct * mm = vma -> vm_mm ;
6198
6196
struct hstate * h = hstate_vma (vma );
6199
6197
vm_fault_t ret = VM_FAULT_SIGBUS ;
6200
6198
int anon_rmap = 0 ;
6201
6199
unsigned long size ;
6202
6200
struct folio * folio ;
6203
6201
pte_t new_pte ;
6204
- spinlock_t * ptl ;
6205
- unsigned long haddr = address & huge_page_mask (h );
6206
6202
bool new_folio , new_pagecache_folio = false;
6207
- u32 hash = hugetlb_fault_mutex_hash (mapping , idx );
6203
+ u32 hash = hugetlb_fault_mutex_hash (mapping , vmf -> pgoff );
6208
6204
6209
6205
/*
6210
6206
* Currently, we are forced to kill the process in the event the
@@ -6223,10 +6219,10 @@ static vm_fault_t hugetlb_no_page(struct mm_struct *mm,
6223
6219
* before we get page_table_lock.
6224
6220
*/
6225
6221
new_folio = false;
6226
- folio = filemap_lock_hugetlb_folio (h , mapping , idx );
6222
+ folio = filemap_lock_hugetlb_folio (h , mapping , vmf -> pgoff );
6227
6223
if (IS_ERR (folio )) {
6228
6224
size = i_size_read (mapping -> host ) >> huge_page_shift (h );
6229
- if (idx >= size )
6225
+ if (vmf -> pgoff >= size )
6230
6226
goto out ;
6231
6227
/* Check for page in userfault range */
6232
6228
if (userfaultfd_missing (vma )) {
@@ -6247,7 +6243,7 @@ static vm_fault_t hugetlb_no_page(struct mm_struct *mm,
6247
6243
* never happen on the page after UFFDIO_COPY has
6248
6244
* correctly installed the page and returned.
6249
6245
*/
6250
- if (!hugetlb_pte_stable (h , mm , ptep , old_pte )) {
6246
+ if (!hugetlb_pte_stable (h , mm , vmf -> pte , vmf -> orig_pte )) {
6251
6247
ret = 0 ;
6252
6248
goto out ;
6253
6249
}
@@ -6262,7 +6258,7 @@ static vm_fault_t hugetlb_no_page(struct mm_struct *mm,
6262
6258
goto out ;
6263
6259
}
6264
6260
6265
- folio = alloc_hugetlb_folio (vma , haddr , 0 );
6261
+ folio = alloc_hugetlb_folio (vma , vmf -> address , 0 );
6266
6262
if (IS_ERR (folio )) {
6267
6263
/*
6268
6264
* Returning error will result in faulting task being
@@ -6276,18 +6272,20 @@ static vm_fault_t hugetlb_no_page(struct mm_struct *mm,
6276
6272
* here. Before returning error, get ptl and make
6277
6273
* sure there really is no pte entry.
6278
6274
*/
6279
- if (hugetlb_pte_stable (h , mm , ptep , old_pte ))
6275
+ if (hugetlb_pte_stable (h , mm , vmf -> pte , vmf -> orig_pte ))
6280
6276
ret = vmf_error (PTR_ERR (folio ));
6281
6277
else
6282
6278
ret = 0 ;
6283
6279
goto out ;
6284
6280
}
6285
- clear_huge_page (& folio -> page , address , pages_per_huge_page (h ));
6281
+ clear_huge_page (& folio -> page , vmf -> real_address ,
6282
+ pages_per_huge_page (h ));
6286
6283
__folio_mark_uptodate (folio );
6287
6284
new_folio = true;
6288
6285
6289
6286
if (vma -> vm_flags & VM_MAYSHARE ) {
6290
- int err = hugetlb_add_to_page_cache (folio , mapping , idx );
6287
+ int err = hugetlb_add_to_page_cache (folio , mapping ,
6288
+ vmf -> pgoff );
6291
6289
if (err ) {
6292
6290
/*
6293
6291
* err can't be -EEXIST which implies someone
@@ -6296,7 +6294,8 @@ static vm_fault_t hugetlb_no_page(struct mm_struct *mm,
6296
6294
* to the page cache. So it's safe to call
6297
6295
* restore_reserve_on_error() here.
6298
6296
*/
6299
- restore_reserve_on_error (h , vma , haddr , folio );
6297
+ restore_reserve_on_error (h , vma , vmf -> address ,
6298
+ folio );
6300
6299
folio_put (folio );
6301
6300
ret = VM_FAULT_SIGBUS ;
6302
6301
goto out ;
@@ -6323,7 +6322,7 @@ static vm_fault_t hugetlb_no_page(struct mm_struct *mm,
6323
6322
folio_unlock (folio );
6324
6323
folio_put (folio );
6325
6324
/* See comment in userfaultfd_missing() block above */
6326
- if (!hugetlb_pte_stable (h , mm , ptep , old_pte )) {
6325
+ if (!hugetlb_pte_stable (h , mm , vmf -> pte , vmf -> orig_pte )) {
6327
6326
ret = 0 ;
6328
6327
goto out ;
6329
6328
}
@@ -6338,23 +6337,23 @@ static vm_fault_t hugetlb_no_page(struct mm_struct *mm,
6338
6337
* any allocations necessary to record that reservation occur outside
6339
6338
* the spinlock.
6340
6339
*/
6341
- if ((flags & FAULT_FLAG_WRITE ) && !(vma -> vm_flags & VM_SHARED )) {
6342
- if (vma_needs_reservation (h , vma , haddr ) < 0 ) {
6340
+ if ((vmf -> flags & FAULT_FLAG_WRITE ) && !(vma -> vm_flags & VM_SHARED )) {
6341
+ if (vma_needs_reservation (h , vma , vmf -> address ) < 0 ) {
6343
6342
ret = VM_FAULT_OOM ;
6344
6343
goto backout_unlocked ;
6345
6344
}
6346
6345
/* Just decrements count, does not deallocate */
6347
- vma_end_reservation (h , vma , haddr );
6346
+ vma_end_reservation (h , vma , vmf -> address );
6348
6347
}
6349
6348
6350
- ptl = huge_pte_lock (h , mm , ptep );
6349
+ vmf -> ptl = huge_pte_lock (h , mm , vmf -> pte );
6351
6350
ret = 0 ;
6352
6351
/* If pte changed from under us, retry */
6353
- if (!pte_same (huge_ptep_get (ptep ), old_pte ))
6352
+ if (!pte_same (huge_ptep_get (vmf -> pte ), vmf -> orig_pte ))
6354
6353
goto backout ;
6355
6354
6356
6355
if (anon_rmap )
6357
- hugetlb_add_new_anon_rmap (folio , vma , haddr );
6356
+ hugetlb_add_new_anon_rmap (folio , vma , vmf -> address );
6358
6357
else
6359
6358
hugetlb_add_file_rmap (folio );
6360
6359
new_pte = make_huge_pte (vma , & folio -> page , ((vma -> vm_flags & VM_WRITE )
@@ -6363,17 +6362,18 @@ static vm_fault_t hugetlb_no_page(struct mm_struct *mm,
6363
6362
* If this pte was previously wr-protected, keep it wr-protected even
6364
6363
* if populated.
6365
6364
*/
6366
- if (unlikely (pte_marker_uffd_wp (old_pte )))
6365
+ if (unlikely (pte_marker_uffd_wp (vmf -> orig_pte )))
6367
6366
new_pte = huge_pte_mkuffd_wp (new_pte );
6368
- set_huge_pte_at (mm , haddr , ptep , new_pte , huge_page_size (h ));
6367
+ set_huge_pte_at (mm , vmf -> address , vmf -> pte , new_pte , huge_page_size (h ));
6369
6368
6370
6369
hugetlb_count_add (pages_per_huge_page (h ), mm );
6371
- if ((flags & FAULT_FLAG_WRITE ) && !(vma -> vm_flags & VM_SHARED )) {
6370
+ if ((vmf -> flags & FAULT_FLAG_WRITE ) && !(vma -> vm_flags & VM_SHARED )) {
6372
6371
/* Optimization, do the COW without a second fault */
6373
- ret = hugetlb_wp (mm , vma , address , ptep , flags , folio , ptl , vmf );
6372
+ ret = hugetlb_wp (mm , vma , vmf -> real_address , vmf -> pte ,
6373
+ vmf -> flags , folio , vmf -> ptl , vmf );
6374
6374
}
6375
6375
6376
- spin_unlock (ptl );
6376
+ spin_unlock (vmf -> ptl );
6377
6377
6378
6378
/*
6379
6379
* Only set hugetlb_migratable in newly allocated pages. Existing pages
@@ -6390,10 +6390,10 @@ static vm_fault_t hugetlb_no_page(struct mm_struct *mm,
6390
6390
return ret ;
6391
6391
6392
6392
backout :
6393
- spin_unlock (ptl );
6393
+ spin_unlock (vmf -> ptl );
6394
6394
backout_unlocked :
6395
6395
if (new_folio && !new_pagecache_folio )
6396
- restore_reserve_on_error (h , vma , haddr , folio );
6396
+ restore_reserve_on_error (h , vma , vmf -> address , folio );
6397
6397
6398
6398
folio_unlock (folio );
6399
6399
folio_put (folio );
@@ -6489,8 +6489,7 @@ vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
6489
6489
* hugetlb_no_page will drop vma lock and hugetlb fault
6490
6490
* mutex internally, which make us return immediately.
6491
6491
*/
6492
- return hugetlb_no_page (mm , vma , mapping , vmf .pgoff , address ,
6493
- vmf .pte , vmf .orig_pte , flags , & vmf );
6492
+ return hugetlb_no_page (mapping , & vmf );
6494
6493
}
6495
6494
6496
6495
ret = 0 ;
0 commit comments