Skip to content

Commit 40f2bbf

Browse files
davidhildenbrandakpm00
authored andcommitted
mm/rmap: drop "compound" parameter from page_add_new_anon_rmap()
New anonymous pages are always mapped natively: only THP/khugepaged code maps a new compound anonymous page and passes "true". Otherwise, we're just dealing with simple, non-compound pages. Let's give the interface clearer semantics and document these. Remove the PageTransCompound() sanity check from page_add_new_anon_rmap(). Link: https://lkml.kernel.org/r/[email protected] Signed-off-by: David Hildenbrand <[email protected]> Acked-by: Vlastimil Babka <[email protected]> Cc: Andrea Arcangeli <[email protected]> Cc: Christoph Hellwig <[email protected]> Cc: David Rientjes <[email protected]> Cc: Don Dutile <[email protected]> Cc: Hugh Dickins <[email protected]> Cc: Jan Kara <[email protected]> Cc: Jann Horn <[email protected]> Cc: Jason Gunthorpe <[email protected]> Cc: John Hubbard <[email protected]> Cc: Khalid Aziz <[email protected]> Cc: "Kirill A. Shutemov" <[email protected]> Cc: Liang Zhang <[email protected]> Cc: "Matthew Wilcox (Oracle)" <[email protected]> Cc: Michal Hocko <[email protected]> Cc: Mike Kravetz <[email protected]> Cc: Mike Rapoport <[email protected]> Cc: Nadav Amit <[email protected]> Cc: Oded Gabbay <[email protected]> Cc: Oleg Nesterov <[email protected]> Cc: Pedro Demarchi Gomes <[email protected]> Cc: Peter Xu <[email protected]> Cc: Rik van Riel <[email protected]> Cc: Roman Gushchin <[email protected]> Cc: Shakeel Butt <[email protected]> Cc: Yang Shi <[email protected]> Signed-off-by: Andrew Morton <[email protected]>
1 parent 28c5209 commit 40f2bbf

File tree

9 files changed

+19
-17
lines changed

9 files changed

+19
-17
lines changed

include/linux/rmap.h

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -185,11 +185,12 @@ void page_move_anon_rmap(struct page *, struct vm_area_struct *);
185185
void page_add_anon_rmap(struct page *, struct vm_area_struct *,
186186
unsigned long address, rmap_t flags);
187187
void page_add_new_anon_rmap(struct page *, struct vm_area_struct *,
188-
unsigned long address, bool compound);
188+
unsigned long address);
189189
void page_add_file_rmap(struct page *, struct vm_area_struct *,
190190
bool compound);
191191
void page_remove_rmap(struct page *, struct vm_area_struct *,
192192
bool compound);
193+
193194
void hugepage_add_anon_rmap(struct page *, struct vm_area_struct *,
194195
unsigned long address, rmap_t flags);
195196
void hugepage_add_new_anon_rmap(struct page *, struct vm_area_struct *,

kernel/events/uprobes.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -180,7 +180,7 @@ static int __replace_page(struct vm_area_struct *vma, unsigned long addr,
180180

181181
if (new_page) {
182182
get_page(new_page);
183-
page_add_new_anon_rmap(new_page, vma, addr, false);
183+
page_add_new_anon_rmap(new_page, vma, addr);
184184
lru_cache_add_inactive_or_unevictable(new_page, vma);
185185
} else
186186
/* no new page, just dec_mm_counter for old_page */

mm/huge_memory.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -647,7 +647,7 @@ static vm_fault_t __do_huge_pmd_anonymous_page(struct vm_fault *vmf,
647647

648648
entry = mk_huge_pmd(page, vma->vm_page_prot);
649649
entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
650-
page_add_new_anon_rmap(page, vma, haddr, true);
650+
page_add_new_anon_rmap(page, vma, haddr);
651651
lru_cache_add_inactive_or_unevictable(page, vma);
652652
pgtable_trans_huge_deposit(vma->vm_mm, vmf->pmd, pgtable);
653653
set_pmd_at(vma->vm_mm, haddr, vmf->pmd, entry);

mm/khugepaged.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1183,7 +1183,7 @@ static void collapse_huge_page(struct mm_struct *mm,
11831183

11841184
spin_lock(pmd_ptl);
11851185
BUG_ON(!pmd_none(*pmd));
1186-
page_add_new_anon_rmap(new_page, vma, address, true);
1186+
page_add_new_anon_rmap(new_page, vma, address);
11871187
lru_cache_add_inactive_or_unevictable(new_page, vma);
11881188
pgtable_trans_huge_deposit(mm, pmd, pgtable);
11891189
set_pmd_at(mm, address, pmd, _pmd);

mm/memory.c

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -893,7 +893,7 @@ copy_present_page(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma
893893
*prealloc = NULL;
894894
copy_user_highpage(new_page, page, addr, src_vma);
895895
__SetPageUptodate(new_page);
896-
page_add_new_anon_rmap(new_page, dst_vma, addr, false);
896+
page_add_new_anon_rmap(new_page, dst_vma, addr);
897897
lru_cache_add_inactive_or_unevictable(new_page, dst_vma);
898898
rss[mm_counter(new_page)]++;
899899

@@ -3058,7 +3058,7 @@ static vm_fault_t wp_page_copy(struct vm_fault *vmf)
30583058
* some TLBs while the old PTE remains in others.
30593059
*/
30603060
ptep_clear_flush_notify(vma, vmf->address, vmf->pte);
3061-
page_add_new_anon_rmap(new_page, vma, vmf->address, false);
3061+
page_add_new_anon_rmap(new_page, vma, vmf->address);
30623062
lru_cache_add_inactive_or_unevictable(new_page, vma);
30633063
/*
30643064
* We call the notify macro here because, when using secondary
@@ -3702,7 +3702,7 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
37023702

37033703
/* ksm created a completely new copy */
37043704
if (unlikely(page != swapcache && swapcache)) {
3705-
page_add_new_anon_rmap(page, vma, vmf->address, false);
3705+
page_add_new_anon_rmap(page, vma, vmf->address);
37063706
lru_cache_add_inactive_or_unevictable(page, vma);
37073707
} else {
37083708
page_add_anon_rmap(page, vma, vmf->address, rmap_flags);
@@ -3852,7 +3852,7 @@ static vm_fault_t do_anonymous_page(struct vm_fault *vmf)
38523852
}
38533853

38543854
inc_mm_counter_fast(vma->vm_mm, MM_ANONPAGES);
3855-
page_add_new_anon_rmap(page, vma, vmf->address, false);
3855+
page_add_new_anon_rmap(page, vma, vmf->address);
38563856
lru_cache_add_inactive_or_unevictable(page, vma);
38573857
setpte:
38583858
set_pte_at(vma->vm_mm, vmf->address, vmf->pte, entry);
@@ -4039,7 +4039,7 @@ void do_set_pte(struct vm_fault *vmf, struct page *page, unsigned long addr)
40394039
/* copy-on-write page */
40404040
if (write && !(vma->vm_flags & VM_SHARED)) {
40414041
inc_mm_counter_fast(vma->vm_mm, MM_ANONPAGES);
4042-
page_add_new_anon_rmap(page, vma, addr, false);
4042+
page_add_new_anon_rmap(page, vma, addr);
40434043
lru_cache_add_inactive_or_unevictable(page, vma);
40444044
} else {
40454045
inc_mm_counter_fast(vma->vm_mm, mm_counter_file(page));

mm/migrate_device.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -610,7 +610,7 @@ static void migrate_vma_insert_page(struct migrate_vma *migrate,
610610
goto unlock_abort;
611611

612612
inc_mm_counter(mm, MM_ANONPAGES);
613-
page_add_new_anon_rmap(page, vma, addr, false);
613+
page_add_new_anon_rmap(page, vma, addr);
614614
if (!is_zone_device_page(page))
615615
lru_cache_add_inactive_or_unevictable(page, vma);
616616
get_page(page);

mm/rmap.c

Lines changed: 6 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -1226,19 +1226,22 @@ void page_add_anon_rmap(struct page *page,
12261226
}
12271227

12281228
/**
1229-
* page_add_new_anon_rmap - add pte mapping to a new anonymous page
1229+
* page_add_new_anon_rmap - add mapping to a new anonymous page
12301230
* @page: the page to add the mapping to
12311231
* @vma: the vm area in which the mapping is added
12321232
* @address: the user virtual address mapped
1233-
* @compound: charge the page as compound or small page
1233+
*
1234+
* If it's a compound page, it is accounted as a compound page. As the page
1235+
* is new, it's assume to get mapped exclusively by a single process.
12341236
*
12351237
* Same as page_add_anon_rmap but must only be called on *new* pages.
12361238
* This means the inc-and-test can be bypassed.
12371239
* Page does not have to be locked.
12381240
*/
12391241
void page_add_new_anon_rmap(struct page *page,
1240-
struct vm_area_struct *vma, unsigned long address, bool compound)
1242+
struct vm_area_struct *vma, unsigned long address)
12411243
{
1244+
const bool compound = PageCompound(page);
12421245
int nr = compound ? thp_nr_pages(page) : 1;
12431246

12441247
VM_BUG_ON_VMA(address < vma->vm_start || address >= vma->vm_end, vma);
@@ -1251,8 +1254,6 @@ void page_add_new_anon_rmap(struct page *page,
12511254

12521255
__mod_lruvec_page_state(page, NR_ANON_THPS, nr);
12531256
} else {
1254-
/* Anon THP always mapped first with PMD */
1255-
VM_BUG_ON_PAGE(PageTransCompound(page), page);
12561257
/* increment count (starts at -1) */
12571258
atomic_set(&page->_mapcount, 0);
12581259
}

mm/swapfile.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1802,7 +1802,7 @@ static int unuse_pte(struct vm_area_struct *vma, pmd_t *pmd,
18021802
if (page == swapcache) {
18031803
page_add_anon_rmap(page, vma, addr, RMAP_NONE);
18041804
} else { /* ksm created a completely new copy */
1805-
page_add_new_anon_rmap(page, vma, addr, false);
1805+
page_add_new_anon_rmap(page, vma, addr);
18061806
lru_cache_add_inactive_or_unevictable(page, vma);
18071807
}
18081808
set_pte_at(vma->vm_mm, addr, pte,

mm/userfaultfd.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -104,7 +104,7 @@ int mfill_atomic_install_pte(struct mm_struct *dst_mm, pmd_t *dst_pmd,
104104
lru_cache_add(page);
105105
page_add_file_rmap(page, dst_vma, false);
106106
} else {
107-
page_add_new_anon_rmap(page, dst_vma, dst_addr, false);
107+
page_add_new_anon_rmap(page, dst_vma, dst_addr);
108108
lru_cache_add_inactive_or_unevictable(page, dst_vma);
109109
}
110110

0 commit comments

Comments
 (0)