@@ -3996,7 +3996,6 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
3996
3996
static vm_fault_t do_anonymous_page (struct vm_fault * vmf )
3997
3997
{
3998
3998
struct vm_area_struct * vma = vmf -> vma ;
3999
- struct page * page ;
4000
3999
struct folio * folio ;
4001
4000
vm_fault_t ret = 0 ;
4002
4001
pte_t entry ;
@@ -4051,19 +4050,18 @@ static vm_fault_t do_anonymous_page(struct vm_fault *vmf)
4051
4050
if (!folio )
4052
4051
goto oom ;
4053
4052
4054
- page = & folio -> page ;
4055
4053
if (mem_cgroup_charge (folio , vma -> vm_mm , GFP_KERNEL ))
4056
4054
goto oom_free_page ;
4057
- cgroup_throttle_swaprate (page , GFP_KERNEL );
4055
+ cgroup_throttle_swaprate (& folio -> page , GFP_KERNEL );
4058
4056
4059
4057
/*
4060
- * The memory barrier inside __SetPageUptodate makes sure that
4058
+ * The memory barrier inside __folio_mark_uptodate makes sure that
4061
4059
* preceding stores to the page contents become visible before
4062
4060
* the set_pte_at() write.
4063
4061
*/
4064
- __SetPageUptodate ( page );
4062
+ __folio_mark_uptodate ( folio );
4065
4063
4066
- entry = mk_pte (page , vma -> vm_page_prot );
4064
+ entry = mk_pte (& folio -> page , vma -> vm_page_prot );
4067
4065
entry = pte_sw_mkyoung (entry );
4068
4066
if (vma -> vm_flags & VM_WRITE )
4069
4067
entry = pte_mkwrite (pte_mkdirty (entry ));
@@ -4082,13 +4080,13 @@ static vm_fault_t do_anonymous_page(struct vm_fault *vmf)
4082
4080
/* Deliver the page fault to userland, check inside PT lock */
4083
4081
if (userfaultfd_missing (vma )) {
4084
4082
pte_unmap_unlock (vmf -> pte , vmf -> ptl );
4085
- put_page ( page );
4083
+ folio_put ( folio );
4086
4084
return handle_userfault (vmf , VM_UFFD_MISSING );
4087
4085
}
4088
4086
4089
4087
inc_mm_counter (vma -> vm_mm , MM_ANONPAGES );
4090
- page_add_new_anon_rmap ( page , vma , vmf -> address );
4091
- lru_cache_add_inactive_or_unevictable ( page , vma );
4088
+ folio_add_new_anon_rmap ( folio , vma , vmf -> address );
4089
+ folio_add_lru_vma ( folio , vma );
4092
4090
setpte :
4093
4091
set_pte_at (vma -> vm_mm , vmf -> address , vmf -> pte , entry );
4094
4092
@@ -4098,10 +4096,10 @@ static vm_fault_t do_anonymous_page(struct vm_fault *vmf)
4098
4096
pte_unmap_unlock (vmf -> pte , vmf -> ptl );
4099
4097
return ret ;
4100
4098
release :
4101
- put_page ( page );
4099
+ folio_put ( folio );
4102
4100
goto unlock ;
4103
4101
oom_free_page :
4104
- put_page ( page );
4102
+ folio_put ( folio );
4105
4103
oom :
4106
4104
return VM_FAULT_OOM ;
4107
4105
}
0 commit comments