Skip to content

Commit ab6e3d0

Browse files
Naoya Horiguchitorvalds
authored andcommitted
mm: soft-dirty: keep soft-dirty bits over thp migration
Soft dirty bit is designed to keep tracked over page migration. This patch makes it work in the same manner for thp migration too. Signed-off-by: Naoya Horiguchi <[email protected]> Signed-off-by: Zi Yan <[email protected]> Cc: "H. Peter Anvin" <[email protected]> Cc: Anshuman Khandual <[email protected]> Cc: Dave Hansen <[email protected]> Cc: David Nellans <[email protected]> Cc: Ingo Molnar <[email protected]> Cc: Kirill A. Shutemov <[email protected]> Cc: Mel Gorman <[email protected]> Cc: Minchan Kim <[email protected]> Cc: Thomas Gleixner <[email protected]> Cc: Vlastimil Babka <[email protected]> Cc: Andrea Arcangeli <[email protected]> Cc: Michal Hocko <[email protected]> Signed-off-by: Andrew Morton <[email protected]> Signed-off-by: Linus Torvalds <[email protected]>
1 parent 84c3fc4 commit ab6e3d0

File tree

5 files changed

+92
-15
lines changed

5 files changed

+92
-15
lines changed

arch/x86/include/asm/pgtable.h

Lines changed: 17 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1172,6 +1172,23 @@ static inline pte_t pte_swp_clear_soft_dirty(pte_t pte)
11721172
{
11731173
return pte_clear_flags(pte, _PAGE_SWP_SOFT_DIRTY);
11741174
}
1175+
1176+
#ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
1177+
static inline pmd_t pmd_swp_mksoft_dirty(pmd_t pmd)
1178+
{
1179+
return pmd_set_flags(pmd, _PAGE_SWP_SOFT_DIRTY);
1180+
}
1181+
1182+
static inline int pmd_swp_soft_dirty(pmd_t pmd)
1183+
{
1184+
return pmd_flags(pmd) & _PAGE_SWP_SOFT_DIRTY;
1185+
}
1186+
1187+
static inline pmd_t pmd_swp_clear_soft_dirty(pmd_t pmd)
1188+
{
1189+
return pmd_clear_flags(pmd, _PAGE_SWP_SOFT_DIRTY);
1190+
}
1191+
#endif
11751192
#endif
11761193

11771194
#define PKRU_AD_BIT 0x1

fs/proc/task_mmu.c

Lines changed: 16 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -978,17 +978,22 @@ static inline void clear_soft_dirty_pmd(struct vm_area_struct *vma,
978978
{
979979
pmd_t pmd = *pmdp;
980980

981-
/* See comment in change_huge_pmd() */
982-
pmdp_invalidate(vma, addr, pmdp);
983-
if (pmd_dirty(*pmdp))
984-
pmd = pmd_mkdirty(pmd);
985-
if (pmd_young(*pmdp))
986-
pmd = pmd_mkyoung(pmd);
987-
988-
pmd = pmd_wrprotect(pmd);
989-
pmd = pmd_clear_soft_dirty(pmd);
990-
991-
set_pmd_at(vma->vm_mm, addr, pmdp, pmd);
981+
if (pmd_present(pmd)) {
982+
/* See comment in change_huge_pmd() */
983+
pmdp_invalidate(vma, addr, pmdp);
984+
if (pmd_dirty(*pmdp))
985+
pmd = pmd_mkdirty(pmd);
986+
if (pmd_young(*pmdp))
987+
pmd = pmd_mkyoung(pmd);
988+
989+
pmd = pmd_wrprotect(pmd);
990+
pmd = pmd_clear_soft_dirty(pmd);
991+
992+
set_pmd_at(vma->vm_mm, addr, pmdp, pmd);
993+
} else if (is_migration_entry(pmd_to_swp_entry(pmd))) {
994+
pmd = pmd_swp_clear_soft_dirty(pmd);
995+
set_pmd_at(vma->vm_mm, addr, pmdp, pmd);
996+
}
992997
}
993998
#else
994999
static inline void clear_soft_dirty_pmd(struct vm_area_struct *vma,

include/asm-generic/pgtable.h

Lines changed: 33 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -630,7 +630,24 @@ static inline void ptep_modify_prot_commit(struct mm_struct *mm,
630630
#define arch_start_context_switch(prev) do {} while (0)
631631
#endif
632632

633-
#ifndef CONFIG_HAVE_ARCH_SOFT_DIRTY
633+
#ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY
634+
#ifndef CONFIG_ARCH_ENABLE_THP_MIGRATION
635+
static inline pmd_t pmd_swp_mksoft_dirty(pmd_t pmd)
636+
{
637+
return pmd;
638+
}
639+
640+
static inline int pmd_swp_soft_dirty(pmd_t pmd)
641+
{
642+
return 0;
643+
}
644+
645+
static inline pmd_t pmd_swp_clear_soft_dirty(pmd_t pmd)
646+
{
647+
return pmd;
648+
}
649+
#endif
650+
#else /* !CONFIG_HAVE_ARCH_SOFT_DIRTY */
634651
static inline int pte_soft_dirty(pte_t pte)
635652
{
636653
return 0;
@@ -675,6 +692,21 @@ static inline pte_t pte_swp_clear_soft_dirty(pte_t pte)
675692
{
676693
return pte;
677694
}
695+
696+
static inline pmd_t pmd_swp_mksoft_dirty(pmd_t pmd)
697+
{
698+
return pmd;
699+
}
700+
701+
static inline int pmd_swp_soft_dirty(pmd_t pmd)
702+
{
703+
return 0;
704+
}
705+
706+
static inline pmd_t pmd_swp_clear_soft_dirty(pmd_t pmd)
707+
{
708+
return pmd;
709+
}
678710
#endif
679711

680712
#ifndef __HAVE_PFNMAP_TRACKING

include/linux/swapops.h

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -183,6 +183,8 @@ static inline swp_entry_t pmd_to_swp_entry(pmd_t pmd)
183183
{
184184
swp_entry_t arch_entry;
185185

186+
if (pmd_swp_soft_dirty(pmd))
187+
pmd = pmd_swp_clear_soft_dirty(pmd);
186188
arch_entry = __pmd_to_swp_entry(pmd);
187189
return swp_entry(__swp_type(arch_entry), __swp_offset(arch_entry));
188190
}

mm/huge_memory.c

Lines changed: 24 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -937,6 +937,8 @@ int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
937937
if (is_write_migration_entry(entry)) {
938938
make_migration_entry_read(&entry);
939939
pmd = swp_entry_to_pmd(entry);
940+
if (pmd_swp_soft_dirty(*src_pmd))
941+
pmd = pmd_swp_mksoft_dirty(pmd);
940942
set_pmd_at(src_mm, addr, src_pmd, pmd);
941943
}
942944
set_pmd_at(dst_mm, addr, dst_pmd, pmd);
@@ -1756,6 +1758,17 @@ static inline int pmd_move_must_withdraw(spinlock_t *new_pmd_ptl,
17561758
}
17571759
#endif
17581760

1761+
static pmd_t move_soft_dirty_pmd(pmd_t pmd)
1762+
{
1763+
#ifdef CONFIG_MEM_SOFT_DIRTY
1764+
if (unlikely(is_pmd_migration_entry(pmd)))
1765+
pmd = pmd_swp_mksoft_dirty(pmd);
1766+
else if (pmd_present(pmd))
1767+
pmd = pmd_mksoft_dirty(pmd);
1768+
#endif
1769+
return pmd;
1770+
}
1771+
17591772
bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr,
17601773
unsigned long new_addr, unsigned long old_end,
17611774
pmd_t *old_pmd, pmd_t *new_pmd, bool *need_flush)
@@ -1798,7 +1811,8 @@ bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr,
17981811
pgtable = pgtable_trans_huge_withdraw(mm, old_pmd);
17991812
pgtable_trans_huge_deposit(mm, new_pmd, pgtable);
18001813
}
1801-
set_pmd_at(mm, new_addr, new_pmd, pmd_mksoft_dirty(pmd));
1814+
pmd = move_soft_dirty_pmd(pmd);
1815+
set_pmd_at(mm, new_addr, new_pmd, pmd);
18021816
if (new_ptl != old_ptl)
18031817
spin_unlock(new_ptl);
18041818
if (force_flush)
@@ -1846,6 +1860,8 @@ int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
18461860
*/
18471861
make_migration_entry_read(&entry);
18481862
newpmd = swp_entry_to_pmd(entry);
1863+
if (pmd_swp_soft_dirty(*pmd))
1864+
newpmd = pmd_swp_mksoft_dirty(newpmd);
18491865
set_pmd_at(mm, addr, pmd, newpmd);
18501866
}
18511867
goto unlock;
@@ -2824,6 +2840,7 @@ void set_pmd_migration_entry(struct page_vma_mapped_walk *pvmw,
28242840
unsigned long address = pvmw->address;
28252841
pmd_t pmdval;
28262842
swp_entry_t entry;
2843+
pmd_t pmdswp;
28272844

28282845
if (!(pvmw->pmd && !pvmw->pte))
28292846
return;
@@ -2837,8 +2854,10 @@ void set_pmd_migration_entry(struct page_vma_mapped_walk *pvmw,
28372854
if (pmd_dirty(pmdval))
28382855
set_page_dirty(page);
28392856
entry = make_migration_entry(page, pmd_write(pmdval));
2840-
pmdval = swp_entry_to_pmd(entry);
2841-
set_pmd_at(mm, address, pvmw->pmd, pmdval);
2857+
pmdswp = swp_entry_to_pmd(entry);
2858+
if (pmd_soft_dirty(pmdval))
2859+
pmdswp = pmd_swp_mksoft_dirty(pmdswp);
2860+
set_pmd_at(mm, address, pvmw->pmd, pmdswp);
28422861
page_remove_rmap(page, true);
28432862
put_page(page);
28442863

@@ -2861,6 +2880,8 @@ void remove_migration_pmd(struct page_vma_mapped_walk *pvmw, struct page *new)
28612880
entry = pmd_to_swp_entry(*pvmw->pmd);
28622881
get_page(new);
28632882
pmde = pmd_mkold(mk_huge_pmd(new, vma->vm_page_prot));
2883+
if (pmd_swp_soft_dirty(*pvmw->pmd))
2884+
pmde = pmd_mksoft_dirty(pmde);
28642885
if (is_write_migration_entry(entry))
28652886
pmde = maybe_pmd_mkwrite(pmde, vma);
28662887

0 commit comments

Comments
 (0)