@@ -863,28 +863,28 @@ copy_nonpresent_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm,
863
863
static inline int
864
864
copy_present_page (struct vm_area_struct * dst_vma , struct vm_area_struct * src_vma ,
865
865
pte_t * dst_pte , pte_t * src_pte , unsigned long addr , int * rss ,
866
- struct page * * prealloc , struct page * page )
866
+ struct folio * * prealloc , struct page * page )
867
867
{
868
- struct page * new_page ;
868
+ struct folio * new_folio ;
869
869
pte_t pte ;
870
870
871
- new_page = * prealloc ;
872
- if (!new_page )
871
+ new_folio = * prealloc ;
872
+ if (!new_folio )
873
873
return - EAGAIN ;
874
874
875
875
/*
876
876
* We have a prealloc page, all good! Take it
877
877
* over and copy the page & arm it.
878
878
*/
879
879
* prealloc = NULL ;
880
- copy_user_highpage (new_page , page , addr , src_vma );
881
- __SetPageUptodate ( new_page );
882
- page_add_new_anon_rmap ( new_page , dst_vma , addr );
883
- lru_cache_add_inactive_or_unevictable ( new_page , dst_vma );
884
- rss [mm_counter ( new_page ) ]++ ;
880
+ copy_user_highpage (& new_folio -> page , page , addr , src_vma );
881
+ __folio_mark_uptodate ( new_folio );
882
+ folio_add_new_anon_rmap ( new_folio , dst_vma , addr );
883
+ folio_add_lru_vma ( new_folio , dst_vma );
884
+ rss [MM_ANONPAGES ]++ ;
885
885
886
886
/* All done, just insert the new page copy in the child */
887
- pte = mk_pte (new_page , dst_vma -> vm_page_prot );
887
+ pte = mk_pte (& new_folio -> page , dst_vma -> vm_page_prot );
888
888
pte = maybe_mkwrite (pte_mkdirty (pte ), dst_vma );
889
889
if (userfaultfd_pte_wp (dst_vma , * src_pte ))
890
890
/* Uffd-wp needs to be delivered to dest pte as well */
@@ -900,7 +900,7 @@ copy_present_page(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma
900
900
static inline int
901
901
copy_present_pte (struct vm_area_struct * dst_vma , struct vm_area_struct * src_vma ,
902
902
pte_t * dst_pte , pte_t * src_pte , unsigned long addr , int * rss ,
903
- struct page * * prealloc )
903
+ struct folio * * prealloc )
904
904
{
905
905
struct mm_struct * src_mm = src_vma -> vm_mm ;
906
906
unsigned long vm_flags = src_vma -> vm_flags ;
@@ -922,11 +922,11 @@ copy_present_pte(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma,
922
922
return copy_present_page (dst_vma , src_vma , dst_pte , src_pte ,
923
923
addr , rss , prealloc , page );
924
924
}
925
- rss [mm_counter ( page ) ]++ ;
925
+ rss [MM_ANONPAGES ]++ ;
926
926
} else if (page ) {
927
927
get_page (page );
928
928
page_dup_file_rmap (page , false);
929
- rss [mm_counter (page )]++ ;
929
+ rss [mm_counter_file (page )]++ ;
930
930
}
931
931
932
932
/*
@@ -954,23 +954,22 @@ copy_present_pte(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma,
954
954
return 0 ;
955
955
}
956
956
957
- static inline struct page *
958
- page_copy_prealloc (struct mm_struct * src_mm , struct vm_area_struct * vma ,
959
- unsigned long addr )
957
+ static inline struct folio * page_copy_prealloc (struct mm_struct * src_mm ,
958
+ struct vm_area_struct * vma , unsigned long addr )
960
959
{
961
- struct page * new_page ;
960
+ struct folio * new_folio ;
962
961
963
- new_page = alloc_page_vma (GFP_HIGHUSER_MOVABLE , vma , addr );
964
- if (!new_page )
962
+ new_folio = vma_alloc_folio (GFP_HIGHUSER_MOVABLE , 0 , vma , addr , false );
963
+ if (!new_folio )
965
964
return NULL ;
966
965
967
- if (mem_cgroup_charge (page_folio ( new_page ) , src_mm , GFP_KERNEL )) {
968
- put_page ( new_page );
966
+ if (mem_cgroup_charge (new_folio , src_mm , GFP_KERNEL )) {
967
+ folio_put ( new_folio );
969
968
return NULL ;
970
969
}
971
- cgroup_throttle_swaprate (new_page , GFP_KERNEL );
970
+ cgroup_throttle_swaprate (& new_folio -> page , GFP_KERNEL );
972
971
973
- return new_page ;
972
+ return new_folio ;
974
973
}
975
974
976
975
static int
@@ -986,7 +985,7 @@ copy_pte_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma,
986
985
int progress , ret = 0 ;
987
986
int rss [NR_MM_COUNTERS ];
988
987
swp_entry_t entry = (swp_entry_t ){0 };
989
- struct page * prealloc = NULL ;
988
+ struct folio * prealloc = NULL ;
990
989
991
990
again :
992
991
progress = 0 ;
@@ -1056,7 +1055,7 @@ copy_pte_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma,
1056
1055
* will allocate page according to address). This
1057
1056
* could only happen if one pinned pte changed.
1058
1057
*/
1059
- put_page (prealloc );
1058
+ folio_put (prealloc );
1060
1059
prealloc = NULL ;
1061
1060
}
1062
1061
progress += 8 ;
@@ -1093,7 +1092,7 @@ copy_pte_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma,
1093
1092
goto again ;
1094
1093
out :
1095
1094
if (unlikely (prealloc ))
1096
- put_page (prealloc );
1095
+ folio_put (prealloc );
1097
1096
return ret ;
1098
1097
}
1099
1098
0 commit comments