@@ -4719,23 +4719,24 @@ hugetlb_install_page(struct vm_area_struct *vma, pte_t *ptep, unsigned long addr
4719
4719
}
4720
4720
4721
4721
int copy_hugetlb_page_range (struct mm_struct * dst , struct mm_struct * src ,
4722
- struct vm_area_struct * vma )
4722
+ struct vm_area_struct * dst_vma ,
4723
+ struct vm_area_struct * src_vma )
4723
4724
{
4724
4725
pte_t * src_pte , * dst_pte , entry , dst_entry ;
4725
4726
struct page * ptepage ;
4726
4727
unsigned long addr ;
4727
- bool cow = is_cow_mapping (vma -> vm_flags );
4728
- struct hstate * h = hstate_vma (vma );
4728
+ bool cow = is_cow_mapping (src_vma -> vm_flags );
4729
+ struct hstate * h = hstate_vma (src_vma );
4729
4730
unsigned long sz = huge_page_size (h );
4730
4731
unsigned long npages = pages_per_huge_page (h );
4731
- struct address_space * mapping = vma -> vm_file -> f_mapping ;
4732
+ struct address_space * mapping = src_vma -> vm_file -> f_mapping ;
4732
4733
struct mmu_notifier_range range ;
4733
4734
int ret = 0 ;
4734
4735
4735
4736
if (cow ) {
4736
- mmu_notifier_range_init (& range , MMU_NOTIFY_CLEAR , 0 , vma , src ,
4737
- vma -> vm_start ,
4738
- vma -> vm_end );
4737
+ mmu_notifier_range_init (& range , MMU_NOTIFY_CLEAR , 0 , src_vma , src ,
4738
+ src_vma -> vm_start ,
4739
+ src_vma -> vm_end );
4739
4740
mmu_notifier_invalidate_range_start (& range );
4740
4741
mmap_assert_write_locked (src );
4741
4742
raw_write_seqcount_begin (& src -> write_protect_seq );
@@ -4749,12 +4750,12 @@ int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
4749
4750
i_mmap_lock_read (mapping );
4750
4751
}
4751
4752
4752
- for (addr = vma -> vm_start ; addr < vma -> vm_end ; addr += sz ) {
4753
+ for (addr = src_vma -> vm_start ; addr < src_vma -> vm_end ; addr += sz ) {
4753
4754
spinlock_t * src_ptl , * dst_ptl ;
4754
4755
src_pte = huge_pte_offset (src , addr , sz );
4755
4756
if (!src_pte )
4756
4757
continue ;
4757
- dst_pte = huge_pte_alloc (dst , vma , addr , sz );
4758
+ dst_pte = huge_pte_alloc (dst , dst_vma , addr , sz );
4758
4759
if (!dst_pte ) {
4759
4760
ret = - ENOMEM ;
4760
4761
break ;
@@ -4789,6 +4790,7 @@ int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
4789
4790
} else if (unlikely (is_hugetlb_entry_migration (entry ) ||
4790
4791
is_hugetlb_entry_hwpoisoned (entry ))) {
4791
4792
swp_entry_t swp_entry = pte_to_swp_entry (entry );
4793
+ bool uffd_wp = huge_pte_uffd_wp (entry );
4792
4794
4793
4795
if (!is_readable_migration_entry (swp_entry ) && cow ) {
4794
4796
/*
@@ -4798,10 +4800,21 @@ int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
4798
4800
swp_entry = make_readable_migration_entry (
4799
4801
swp_offset (swp_entry ));
4800
4802
entry = swp_entry_to_pte (swp_entry );
4803
+ if (userfaultfd_wp (src_vma ) && uffd_wp )
4804
+ entry = huge_pte_mkuffd_wp (entry );
4801
4805
set_huge_swap_pte_at (src , addr , src_pte ,
4802
4806
entry , sz );
4803
4807
}
4808
+ if (!userfaultfd_wp (dst_vma ) && uffd_wp )
4809
+ entry = huge_pte_clear_uffd_wp (entry );
4804
4810
set_huge_swap_pte_at (dst , addr , dst_pte , entry , sz );
4811
+ } else if (unlikely (is_pte_marker (entry ))) {
4812
+ /*
4813
+ * We copy the pte marker only if the dst vma has
4814
+ * uffd-wp enabled.
4815
+ */
4816
+ if (userfaultfd_wp (dst_vma ))
4817
+ set_huge_pte_at (dst , addr , dst_pte , entry );
4805
4818
} else {
4806
4819
entry = huge_ptep_get (src_pte );
4807
4820
ptepage = pte_page (entry );
@@ -4819,20 +4832,21 @@ int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
4819
4832
*/
4820
4833
if (!PageAnon (ptepage )) {
4821
4834
page_dup_file_rmap (ptepage , true);
4822
- } else if (page_try_dup_anon_rmap (ptepage , true, vma )) {
4835
+ } else if (page_try_dup_anon_rmap (ptepage , true,
4836
+ src_vma )) {
4823
4837
pte_t src_pte_old = entry ;
4824
4838
struct page * new ;
4825
4839
4826
4840
spin_unlock (src_ptl );
4827
4841
spin_unlock (dst_ptl );
4828
4842
/* Do not use reserve as it's private owned */
4829
- new = alloc_huge_page (vma , addr , 1 );
4843
+ new = alloc_huge_page (dst_vma , addr , 1 );
4830
4844
if (IS_ERR (new )) {
4831
4845
put_page (ptepage );
4832
4846
ret = PTR_ERR (new );
4833
4847
break ;
4834
4848
}
4835
- copy_user_huge_page (new , ptepage , addr , vma ,
4849
+ copy_user_huge_page (new , ptepage , addr , dst_vma ,
4836
4850
npages );
4837
4851
put_page (ptepage );
4838
4852
@@ -4842,13 +4856,13 @@ int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
4842
4856
spin_lock_nested (src_ptl , SINGLE_DEPTH_NESTING );
4843
4857
entry = huge_ptep_get (src_pte );
4844
4858
if (!pte_same (src_pte_old , entry )) {
4845
- restore_reserve_on_error (h , vma , addr ,
4859
+ restore_reserve_on_error (h , dst_vma , addr ,
4846
4860
new );
4847
4861
put_page (new );
4848
4862
/* dst_entry won't change as in child */
4849
4863
goto again ;
4850
4864
}
4851
- hugetlb_install_page (vma , dst_pte , addr , new );
4865
+ hugetlb_install_page (dst_vma , dst_pte , addr , new );
4852
4866
spin_unlock (src_ptl );
4853
4867
spin_unlock (dst_ptl );
4854
4868
continue ;
0 commit comments