Skip to content

Commit cb3184d

Browse files
Matthew Wilcox (Oracle)akpm00
authored andcommitted
mm: convert do_anonymous_page() to use a folio
Removes six calls to compound_head(); some inline and some external. Link: https://lkml.kernel.org/r/[email protected] Signed-off-by: Matthew Wilcox (Oracle) <[email protected]> Reviewed-by: Zi Yan <[email protected]> Signed-off-by: Andrew Morton <[email protected]>
1 parent 6bc56a4 commit cb3184d

File tree

1 file changed

+9
-11
lines changed

1 file changed

+9
-11
lines changed

mm/memory.c

Lines changed: 9 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -3996,7 +3996,6 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
39963996
static vm_fault_t do_anonymous_page(struct vm_fault *vmf)
39973997
{
39983998
struct vm_area_struct *vma = vmf->vma;
3999-
struct page *page;
40003999
struct folio *folio;
40014000
vm_fault_t ret = 0;
40024001
pte_t entry;
@@ -4051,19 +4050,18 @@ static vm_fault_t do_anonymous_page(struct vm_fault *vmf)
40514050
if (!folio)
40524051
goto oom;
40534052

4054-
page = &folio->page;
40554053
if (mem_cgroup_charge(folio, vma->vm_mm, GFP_KERNEL))
40564054
goto oom_free_page;
4057-
cgroup_throttle_swaprate(page, GFP_KERNEL);
4055+
cgroup_throttle_swaprate(&folio->page, GFP_KERNEL);
40584056

40594057
/*
4060-
* The memory barrier inside __SetPageUptodate makes sure that
4058+
* The memory barrier inside __folio_mark_uptodate makes sure that
40614059
* preceding stores to the page contents become visible before
40624060
* the set_pte_at() write.
40634061
*/
4064-
__SetPageUptodate(page);
4062+
__folio_mark_uptodate(folio);
40654063

4066-
entry = mk_pte(page, vma->vm_page_prot);
4064+
entry = mk_pte(&folio->page, vma->vm_page_prot);
40674065
entry = pte_sw_mkyoung(entry);
40684066
if (vma->vm_flags & VM_WRITE)
40694067
entry = pte_mkwrite(pte_mkdirty(entry));
@@ -4082,13 +4080,13 @@ static vm_fault_t do_anonymous_page(struct vm_fault *vmf)
40824080
/* Deliver the page fault to userland, check inside PT lock */
40834081
if (userfaultfd_missing(vma)) {
40844082
pte_unmap_unlock(vmf->pte, vmf->ptl);
4085-
put_page(page);
4083+
folio_put(folio);
40864084
return handle_userfault(vmf, VM_UFFD_MISSING);
40874085
}
40884086

40894087
inc_mm_counter(vma->vm_mm, MM_ANONPAGES);
4090-
page_add_new_anon_rmap(page, vma, vmf->address);
4091-
lru_cache_add_inactive_or_unevictable(page, vma);
4088+
folio_add_new_anon_rmap(folio, vma, vmf->address);
4089+
folio_add_lru_vma(folio, vma);
40924090
setpte:
40934091
set_pte_at(vma->vm_mm, vmf->address, vmf->pte, entry);
40944092

@@ -4098,10 +4096,10 @@ static vm_fault_t do_anonymous_page(struct vm_fault *vmf)
40984096
pte_unmap_unlock(vmf->pte, vmf->ptl);
40994097
return ret;
41004098
release:
4101-
put_page(page);
4099+
folio_put(folio);
41024100
goto unlock;
41034101
oom_free_page:
4104-
put_page(page);
4102+
folio_put(folio);
41054103
oom:
41064104
return VM_FAULT_OOM;
41074105
}

0 commit comments

Comments
 (0)