Skip to content

Commit a8fa41a

Browse files
kiryltorvalds
authored andcommitted
mm, rmap: check all VMAs that PTE-mapped THP can be part of
Current rmap code can miss a VMA that maps PTE-mapped THP if the first suppage of the THP was unmapped from the VMA. We need to walk rmap for the whole range of offsets that THP covers, not only the first one. vma_address() also need to be corrected to check the range instead of the first subpage. Link: http://lkml.kernel.org/r/[email protected] Signed-off-by: Kirill A. Shutemov <[email protected]> Acked-by: Hillf Danton <[email protected]> Cc: Andrea Arcangeli <[email protected]> Cc: Hugh Dickins <[email protected]> Cc: Johannes Weiner <[email protected]> Cc: Oleg Nesterov <[email protected]> Cc: Peter Zijlstra <[email protected]> Cc: Rik van Riel <[email protected]> Cc: Srikar Dronamraju <[email protected]> Cc: Vladimir Davydov <[email protected]> Signed-off-by: Andrew Morton <[email protected]> Signed-off-by: Linus Torvalds <[email protected]>
1 parent 699fa21 commit a8fa41a

File tree

2 files changed

+16
-9
lines changed

2 files changed

+16
-9
lines changed

mm/internal.h

Lines changed: 6 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -335,12 +335,15 @@ __vma_address(struct page *page, struct vm_area_struct *vma)
335335
static inline unsigned long
336336
vma_address(struct page *page, struct vm_area_struct *vma)
337337
{
338-
unsigned long address = __vma_address(page, vma);
338+
unsigned long start, end;
339+
340+
start = __vma_address(page, vma);
341+
end = start + PAGE_SIZE * (hpage_nr_pages(page) - 1);
339342

340343
/* page should be within @vma mapping range */
341-
VM_BUG_ON_VMA(address < vma->vm_start || address >= vma->vm_end, vma);
344+
VM_BUG_ON_VMA(end < vma->vm_start || start >= vma->vm_end, vma);
342345

343-
return address;
346+
return max(start, vma->vm_start);
344347
}
345348

346349
#else /* !CONFIG_MMU */

mm/rmap.c

Lines changed: 10 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -1757,7 +1757,7 @@ static int rmap_walk_anon(struct page *page, struct rmap_walk_control *rwc,
17571757
bool locked)
17581758
{
17591759
struct anon_vma *anon_vma;
1760-
pgoff_t pgoff;
1760+
pgoff_t pgoff_start, pgoff_end;
17611761
struct anon_vma_chain *avc;
17621762
int ret = SWAP_AGAIN;
17631763

@@ -1771,8 +1771,10 @@ static int rmap_walk_anon(struct page *page, struct rmap_walk_control *rwc,
17711771
if (!anon_vma)
17721772
return ret;
17731773

1774-
pgoff = page_to_pgoff(page);
1775-
anon_vma_interval_tree_foreach(avc, &anon_vma->rb_root, pgoff, pgoff) {
1774+
pgoff_start = page_to_pgoff(page);
1775+
pgoff_end = pgoff_start + hpage_nr_pages(page) - 1;
1776+
anon_vma_interval_tree_foreach(avc, &anon_vma->rb_root,
1777+
pgoff_start, pgoff_end) {
17761778
struct vm_area_struct *vma = avc->vma;
17771779
unsigned long address = vma_address(page, vma);
17781780

@@ -1810,7 +1812,7 @@ static int rmap_walk_file(struct page *page, struct rmap_walk_control *rwc,
18101812
bool locked)
18111813
{
18121814
struct address_space *mapping = page_mapping(page);
1813-
pgoff_t pgoff;
1815+
pgoff_t pgoff_start, pgoff_end;
18141816
struct vm_area_struct *vma;
18151817
int ret = SWAP_AGAIN;
18161818

@@ -1825,10 +1827,12 @@ static int rmap_walk_file(struct page *page, struct rmap_walk_control *rwc,
18251827
if (!mapping)
18261828
return ret;
18271829

1828-
pgoff = page_to_pgoff(page);
1830+
pgoff_start = page_to_pgoff(page);
1831+
pgoff_end = pgoff_start + hpage_nr_pages(page) - 1;
18291832
if (!locked)
18301833
i_mmap_lock_read(mapping);
1831-
vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) {
1834+
vma_interval_tree_foreach(vma, &mapping->i_mmap,
1835+
pgoff_start, pgoff_end) {
18321836
unsigned long address = vma_address(page, vma);
18331837

18341838
cond_resched();

0 commit comments

Comments
 (0)