Skip to content

Commit edf5047

Browse files
Matthew Wilcox (Oracle)akpm00
authored andcommitted
mm: use a folio in copy_pte_range()
Allocate an order-0 folio instead of a page and pass it all the way down the call chain. Removes dozens of calls to compound_head(). Link: https://lkml.kernel.org/r/[email protected] Signed-off-by: Matthew Wilcox (Oracle) <[email protected]> Reviewed-by: Zi Yan <[email protected]> Signed-off-by: Andrew Morton <[email protected]>
1 parent 28d41a4 commit edf5047

File tree

1 file changed

+25
-26
lines changed

1 file changed

+25
-26
lines changed

mm/memory.c

Lines changed: 25 additions & 26 deletions
Original file line numberDiff line numberDiff line change
@@ -863,28 +863,28 @@ copy_nonpresent_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm,
863863
static inline int
864864
copy_present_page(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma,
865865
pte_t *dst_pte, pte_t *src_pte, unsigned long addr, int *rss,
866-
struct page **prealloc, struct page *page)
866+
struct folio **prealloc, struct page *page)
867867
{
868-
struct page *new_page;
868+
struct folio *new_folio;
869869
pte_t pte;
870870

871-
new_page = *prealloc;
872-
if (!new_page)
871+
new_folio = *prealloc;
872+
if (!new_folio)
873873
return -EAGAIN;
874874

875875
/*
876876
* We have a prealloc page, all good! Take it
877877
* over and copy the page & arm it.
878878
*/
879879
*prealloc = NULL;
880-
copy_user_highpage(new_page, page, addr, src_vma);
881-
__SetPageUptodate(new_page);
882-
page_add_new_anon_rmap(new_page, dst_vma, addr);
883-
lru_cache_add_inactive_or_unevictable(new_page, dst_vma);
884-
rss[mm_counter(new_page)]++;
880+
copy_user_highpage(&new_folio->page, page, addr, src_vma);
881+
__folio_mark_uptodate(new_folio);
882+
folio_add_new_anon_rmap(new_folio, dst_vma, addr);
883+
folio_add_lru_vma(new_folio, dst_vma);
884+
rss[MM_ANONPAGES]++;
885885

886886
/* All done, just insert the new page copy in the child */
887-
pte = mk_pte(new_page, dst_vma->vm_page_prot);
887+
pte = mk_pte(&new_folio->page, dst_vma->vm_page_prot);
888888
pte = maybe_mkwrite(pte_mkdirty(pte), dst_vma);
889889
if (userfaultfd_pte_wp(dst_vma, *src_pte))
890890
/* Uffd-wp needs to be delivered to dest pte as well */
@@ -900,7 +900,7 @@ copy_present_page(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma
900900
static inline int
901901
copy_present_pte(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma,
902902
pte_t *dst_pte, pte_t *src_pte, unsigned long addr, int *rss,
903-
struct page **prealloc)
903+
struct folio **prealloc)
904904
{
905905
struct mm_struct *src_mm = src_vma->vm_mm;
906906
unsigned long vm_flags = src_vma->vm_flags;
@@ -922,11 +922,11 @@ copy_present_pte(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma,
922922
return copy_present_page(dst_vma, src_vma, dst_pte, src_pte,
923923
addr, rss, prealloc, page);
924924
}
925-
rss[mm_counter(page)]++;
925+
rss[MM_ANONPAGES]++;
926926
} else if (page) {
927927
get_page(page);
928928
page_dup_file_rmap(page, false);
929-
rss[mm_counter(page)]++;
929+
rss[mm_counter_file(page)]++;
930930
}
931931

932932
/*
@@ -954,23 +954,22 @@ copy_present_pte(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma,
954954
return 0;
955955
}
956956

957-
static inline struct page *
958-
page_copy_prealloc(struct mm_struct *src_mm, struct vm_area_struct *vma,
959-
unsigned long addr)
957+
static inline struct folio *page_copy_prealloc(struct mm_struct *src_mm,
958+
struct vm_area_struct *vma, unsigned long addr)
960959
{
961-
struct page *new_page;
960+
struct folio *new_folio;
962961

963-
new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, addr);
964-
if (!new_page)
962+
new_folio = vma_alloc_folio(GFP_HIGHUSER_MOVABLE, 0, vma, addr, false);
963+
if (!new_folio)
965964
return NULL;
966965

967-
if (mem_cgroup_charge(page_folio(new_page), src_mm, GFP_KERNEL)) {
968-
put_page(new_page);
966+
if (mem_cgroup_charge(new_folio, src_mm, GFP_KERNEL)) {
967+
folio_put(new_folio);
969968
return NULL;
970969
}
971-
cgroup_throttle_swaprate(new_page, GFP_KERNEL);
970+
cgroup_throttle_swaprate(&new_folio->page, GFP_KERNEL);
972971

973-
return new_page;
972+
return new_folio;
974973
}
975974

976975
static int
@@ -986,7 +985,7 @@ copy_pte_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma,
986985
int progress, ret = 0;
987986
int rss[NR_MM_COUNTERS];
988987
swp_entry_t entry = (swp_entry_t){0};
989-
struct page *prealloc = NULL;
988+
struct folio *prealloc = NULL;
990989

991990
again:
992991
progress = 0;
@@ -1056,7 +1055,7 @@ copy_pte_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma,
10561055
* will allocate page according to address). This
10571056
* could only happen if one pinned pte changed.
10581057
*/
1059-
put_page(prealloc);
1058+
folio_put(prealloc);
10601059
prealloc = NULL;
10611060
}
10621061
progress += 8;
@@ -1093,7 +1092,7 @@ copy_pte_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma,
10931092
goto again;
10941093
out:
10951094
if (unlikely(prealloc))
1096-
put_page(prealloc);
1095+
folio_put(prealloc);
10971096
return ret;
10981097
}
10991098

0 commit comments

Comments
 (0)