Skip to content

Commit b2b29d6

Browse files
Matthew Wilcoxtorvalds
authored andcommitted
mm: account PMD tables like PTE tables
We account the PTE level of the page tables to the process in order to make smarter OOM decisions and help diagnose why memory is fragmented. For these same reasons, we should account pages allocated for PMDs. With larger process address spaces and ASLR, the number of PMDs in use is higher than it used to be so the inaccuracy is starting to matter. [[email protected]: arm: __pmd_free_tlb(): call page table destructor] Link: https://lkml.kernel.org/r/[email protected] Signed-off-by: Matthew Wilcox (Oracle) <[email protected]> Signed-off-by: Andrew Morton <[email protected]> Reviewed-by: Mike Rapoport <[email protected]> Cc: Abdul Haleem <[email protected]> Cc: Andy Lutomirski <[email protected]> Cc: Arnd Bergmann <[email protected]> Cc: Christophe Leroy <[email protected]> Cc: Joerg Roedel <[email protected]> Cc: Max Filippov <[email protected]> Cc: Peter Zijlstra <[email protected]> Cc: Satheesh Rajendran <[email protected]> Cc: Stafford Horne <[email protected]> Cc: Naresh Kamboju <[email protected]> Cc: Anders Roxell <[email protected]> Link: http://lkml.kernel.org/r/[email protected] Signed-off-by: Linus Torvalds <[email protected]>
1 parent 34d1091 commit b2b29d6

File tree

2 files changed

+21
-4
lines changed

2 files changed

+21
-4
lines changed

arch/arm/include/asm/tlb.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -59,6 +59,7 @@ __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmdp, unsigned long addr)
5959
#ifdef CONFIG_ARM_LPAE
6060
struct page *page = virt_to_page(pmdp);
6161

62+
pgtable_pmd_page_dtor(page);
6263
tlb_remove_table(tlb, page);
6364
#endif
6465
}

include/linux/mm.h

Lines changed: 20 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -2254,15 +2254,15 @@ static inline spinlock_t *pmd_lockptr(struct mm_struct *mm, pmd_t *pmd)
22542254
return ptlock_ptr(pmd_to_page(pmd));
22552255
}
22562256

2257-
static inline bool pgtable_pmd_page_ctor(struct page *page)
2257+
static inline bool pmd_ptlock_init(struct page *page)
22582258
{
22592259
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
22602260
page->pmd_huge_pte = NULL;
22612261
#endif
22622262
return ptlock_init(page);
22632263
}
22642264

2265-
static inline void pgtable_pmd_page_dtor(struct page *page)
2265+
static inline void pmd_ptlock_free(struct page *page)
22662266
{
22672267
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
22682268
VM_BUG_ON_PAGE(page->pmd_huge_pte, page);
@@ -2279,8 +2279,8 @@ static inline spinlock_t *pmd_lockptr(struct mm_struct *mm, pmd_t *pmd)
22792279
return &mm->page_table_lock;
22802280
}
22812281

2282-
static inline bool pgtable_pmd_page_ctor(struct page *page) { return true; }
2283-
static inline void pgtable_pmd_page_dtor(struct page *page) {}
2282+
static inline bool pmd_ptlock_init(struct page *page) { return true; }
2283+
static inline void pmd_ptlock_free(struct page *page) {}
22842284

22852285
#define pmd_huge_pte(mm, pmd) ((mm)->pmd_huge_pte)
22862286

@@ -2293,6 +2293,22 @@ static inline spinlock_t *pmd_lock(struct mm_struct *mm, pmd_t *pmd)
22932293
return ptl;
22942294
}
22952295

2296+
static inline bool pgtable_pmd_page_ctor(struct page *page)
2297+
{
2298+
if (!pmd_ptlock_init(page))
2299+
return false;
2300+
__SetPageTable(page);
2301+
inc_zone_page_state(page, NR_PAGETABLE);
2302+
return true;
2303+
}
2304+
2305+
static inline void pgtable_pmd_page_dtor(struct page *page)
2306+
{
2307+
pmd_ptlock_free(page);
2308+
__ClearPageTable(page);
2309+
dec_zone_page_state(page, NR_PAGETABLE);
2310+
}
2311+
22962312
/*
22972313
* No scalability reason to split PUD locks yet, but follow the same pattern
22982314
* as the PMD locks to make it easier if we decide to. The VM should not be

0 commit comments

Comments
 (0)