Skip to content

Commit 369ea82

Browse files
Jérôme Glissetorvalds
authored andcommitted
mm/rmap: update to new mmu_notifier semantic v2
Replace all mmu_notifier_invalidate_page() calls by *_invalidate_range() and make sure it is bracketed by calls to *_invalidate_range_start()/end(). Note that because we can not presume the pmd value or pte value we have to assume the worst and unconditionaly report an invalidation as happening. Changed since v2: - try_to_unmap_one() only one call to mmu_notifier_invalidate_range() - compute end with PAGE_SIZE << compound_order(page) - fix PageHuge() case in try_to_unmap_one() Signed-off-by: Jérôme Glisse <[email protected]> Reviewed-by: Andrea Arcangeli <[email protected]> Cc: Dan Williams <[email protected]> Cc: Ross Zwisler <[email protected]> Cc: Bernhard Held <[email protected]> Cc: Adam Borowski <[email protected]> Cc: Radim Krčmář <[email protected]> Cc: Wanpeng Li <[email protected]> Cc: Paolo Bonzini <[email protected]> Cc: Takashi Iwai <[email protected]> Cc: Nadav Amit <[email protected]> Cc: Mike Galbraith <[email protected]> Cc: Kirill A. Shutemov <[email protected]> Cc: axie <[email protected]> Cc: Andrew Morton <[email protected]> Signed-off-by: Linus Torvalds <[email protected]>
1 parent a4d1a88 commit 369ea82

File tree

1 file changed

+32
-3
lines changed

1 file changed

+32
-3
lines changed

mm/rmap.c

Lines changed: 32 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -887,11 +887,21 @@ static bool page_mkclean_one(struct page *page, struct vm_area_struct *vma,
887887
.address = address,
888888
.flags = PVMW_SYNC,
889889
};
890+
unsigned long start = address, end;
890891
int *cleaned = arg;
891892

893+
/*
894+
* We have to assume the worse case ie pmd for invalidation. Note that
895+
* the page can not be free from this function.
896+
*/
897+
end = min(vma->vm_end, start + (PAGE_SIZE << compound_order(page)));
898+
mmu_notifier_invalidate_range_start(vma->vm_mm, start, end);
899+
892900
while (page_vma_mapped_walk(&pvmw)) {
901+
unsigned long cstart, cend;
893902
int ret = 0;
894-
address = pvmw.address;
903+
904+
cstart = address = pvmw.address;
895905
if (pvmw.pte) {
896906
pte_t entry;
897907
pte_t *pte = pvmw.pte;
@@ -904,6 +914,7 @@ static bool page_mkclean_one(struct page *page, struct vm_area_struct *vma,
904914
entry = pte_wrprotect(entry);
905915
entry = pte_mkclean(entry);
906916
set_pte_at(vma->vm_mm, address, pte, entry);
917+
cend = cstart + PAGE_SIZE;
907918
ret = 1;
908919
} else {
909920
#ifdef CONFIG_TRANSPARENT_HUGE_PAGECACHE
@@ -918,6 +929,8 @@ static bool page_mkclean_one(struct page *page, struct vm_area_struct *vma,
918929
entry = pmd_wrprotect(entry);
919930
entry = pmd_mkclean(entry);
920931
set_pmd_at(vma->vm_mm, address, pmd, entry);
932+
cstart &= PMD_MASK;
933+
cend = cstart + PMD_SIZE;
921934
ret = 1;
922935
#else
923936
/* unexpected pmd-mapped page? */
@@ -926,11 +939,13 @@ static bool page_mkclean_one(struct page *page, struct vm_area_struct *vma,
926939
}
927940

928941
if (ret) {
929-
mmu_notifier_invalidate_page(vma->vm_mm, address);
942+
mmu_notifier_invalidate_range(vma->vm_mm, cstart, cend);
930943
(*cleaned)++;
931944
}
932945
}
933946

947+
mmu_notifier_invalidate_range_end(vma->vm_mm, start, end);
948+
934949
return true;
935950
}
936951

@@ -1324,6 +1339,7 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
13241339
pte_t pteval;
13251340
struct page *subpage;
13261341
bool ret = true;
1342+
unsigned long start = address, end;
13271343
enum ttu_flags flags = (enum ttu_flags)arg;
13281344

13291345
/* munlock has nothing to gain from examining un-locked vmas */
@@ -1335,6 +1351,14 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
13351351
flags & TTU_MIGRATION, page);
13361352
}
13371353

1354+
/*
1355+
* We have to assume the worse case ie pmd for invalidation. Note that
1356+
* the page can not be free in this function as call of try_to_unmap()
1357+
* must hold a reference on the page.
1358+
*/
1359+
end = min(vma->vm_end, start + (PAGE_SIZE << compound_order(page)));
1360+
mmu_notifier_invalidate_range_start(vma->vm_mm, start, end);
1361+
13381362
while (page_vma_mapped_walk(&pvmw)) {
13391363
/*
13401364
* If the page is mlock()d, we cannot swap it out.
@@ -1445,6 +1469,7 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
14451469
if (unlikely(PageSwapBacked(page) != PageSwapCache(page))) {
14461470
WARN_ON_ONCE(1);
14471471
ret = false;
1472+
/* We have to invalidate as we cleared the pte */
14481473
page_vma_mapped_walk_done(&pvmw);
14491474
break;
14501475
}
@@ -1490,8 +1515,12 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
14901515
discard:
14911516
page_remove_rmap(subpage, PageHuge(page));
14921517
put_page(page);
1493-
mmu_notifier_invalidate_page(mm, address);
1518+
mmu_notifier_invalidate_range(mm, address,
1519+
address + PAGE_SIZE);
14941520
}
1521+
1522+
mmu_notifier_invalidate_range_end(vma->vm_mm, start, end);
1523+
14951524
return ret;
14961525
}
14971526

0 commit comments

Comments
 (0)