Skip to content

Commit 018ee47

Browse files
yuzhaogoogleakpm00
authored andcommitted
mm: multi-gen LRU: exploit locality in rmap
Searching the rmap for PTEs mapping each page on an LRU list (to test and clear the accessed bit) can be expensive because pages from different VMAs (PA space) are not cache friendly to the rmap (VA space). For workloads mostly using mapped pages, searching the rmap can incur the highest CPU cost in the reclaim path. This patch exploits spatial locality to reduce the trips into the rmap. When shrink_page_list() walks the rmap and finds a young PTE, a new function lru_gen_look_around() scans at most BITS_PER_LONG-1 adjacent PTEs. On finding another young PTE, it clears the accessed bit and updates the gen counter of the page mapped by this PTE to (max_seq%MAX_NR_GENS)+1. Server benchmark results: Single workload: fio (buffered I/O): no change Single workload: memcached (anon): +[3, 5]% Ops/sec KB/sec patch1-6: 1106168.46 43025.04 patch1-7: 1147696.57 44640.29 Configurations: no change Client benchmark results: kswapd profiles: patch1-6 39.03% lzo1x_1_do_compress (real work) 18.47% page_vma_mapped_walk (overhead) 6.74% _raw_spin_unlock_irq 3.97% do_raw_spin_lock 2.49% ptep_clear_flush 2.48% anon_vma_interval_tree_iter_first 1.92% folio_referenced_one 1.88% __zram_bvec_write 1.48% memmove 1.31% vma_interval_tree_iter_next patch1-7 48.16% lzo1x_1_do_compress (real work) 8.20% page_vma_mapped_walk (overhead) 7.06% _raw_spin_unlock_irq 2.92% ptep_clear_flush 2.53% __zram_bvec_write 2.11% do_raw_spin_lock 2.02% memmove 1.93% lru_gen_look_around 1.56% free_unref_page_list 1.40% memset Configurations: no change Link: https://lkml.kernel.org/r/[email protected] Signed-off-by: Yu Zhao <[email protected]> Acked-by: Barry Song <[email protected]> Acked-by: Brian Geffon <[email protected]> Acked-by: Jan Alexander Steffens (heftig) <[email protected]> Acked-by: Oleksandr Natalenko <[email protected]> Acked-by: Steven Barrett <[email protected]> Acked-by: Suleiman Souhlal <[email protected]> Tested-by: Daniel Byrne <[email protected]> Tested-by: Donald Carr <[email protected]> Tested-by: Holger Hoffstätte <[email protected]> Tested-by: Konstantin Kharlamov <[email protected]> Tested-by: Shuang Zhai <[email protected]> Tested-by: Sofia Trinh <[email protected]> Tested-by: Vaibhav Jain <[email protected]> Cc: Andi Kleen <[email protected]> Cc: Aneesh Kumar K.V <[email protected]> Cc: Catalin Marinas <[email protected]> Cc: Dave Hansen <[email protected]> Cc: Hillf Danton <[email protected]> Cc: Jens Axboe <[email protected]> Cc: Johannes Weiner <[email protected]> Cc: Jonathan Corbet <[email protected]> Cc: Linus Torvalds <[email protected]> Cc: Matthew Wilcox <[email protected]> Cc: Mel Gorman <[email protected]> Cc: Miaohe Lin <[email protected]> Cc: Michael Larabel <[email protected]> Cc: Michal Hocko <[email protected]> Cc: Mike Rapoport <[email protected]> Cc: Mike Rapoport <[email protected]> Cc: Peter Zijlstra <[email protected]> Cc: Qi Zheng <[email protected]> Cc: Tejun Heo <[email protected]> Cc: Vlastimil Babka <[email protected]> Cc: Will Deacon <[email protected]> Signed-off-by: Andrew Morton <[email protected]>
1 parent ac35a49 commit 018ee47

File tree

8 files changed

+236
-2
lines changed

8 files changed

+236
-2
lines changed

include/linux/memcontrol.h

Lines changed: 31 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -445,6 +445,7 @@ static inline struct obj_cgroup *__folio_objcg(struct folio *folio)
445445
* - LRU isolation
446446
* - lock_page_memcg()
447447
* - exclusive reference
448+
* - mem_cgroup_trylock_pages()
448449
*
449450
* For a kmem folio a caller should hold an rcu read lock to protect memcg
450451
* associated with a kmem folio from being released.
@@ -506,6 +507,7 @@ static inline struct mem_cgroup *folio_memcg_rcu(struct folio *folio)
506507
* - LRU isolation
507508
* - lock_page_memcg()
508509
* - exclusive reference
510+
* - mem_cgroup_trylock_pages()
509511
*
510512
* For a kmem page a caller should hold an rcu read lock to protect memcg
511513
* associated with a kmem page from being released.
@@ -960,6 +962,23 @@ void unlock_page_memcg(struct page *page);
960962

961963
void __mod_memcg_state(struct mem_cgroup *memcg, int idx, int val);
962964

965+
/* try to stablize folio_memcg() for all the pages in a memcg */
966+
static inline bool mem_cgroup_trylock_pages(struct mem_cgroup *memcg)
967+
{
968+
rcu_read_lock();
969+
970+
if (mem_cgroup_disabled() || !atomic_read(&memcg->moving_account))
971+
return true;
972+
973+
rcu_read_unlock();
974+
return false;
975+
}
976+
977+
static inline void mem_cgroup_unlock_pages(void)
978+
{
979+
rcu_read_unlock();
980+
}
981+
963982
/* idx can be of type enum memcg_stat_item or node_stat_item */
964983
static inline void mod_memcg_state(struct mem_cgroup *memcg,
965984
int idx, int val)
@@ -1434,6 +1453,18 @@ static inline void folio_memcg_unlock(struct folio *folio)
14341453
{
14351454
}
14361455

1456+
static inline bool mem_cgroup_trylock_pages(struct mem_cgroup *memcg)
1457+
{
1458+
/* to match folio_memcg_rcu() */
1459+
rcu_read_lock();
1460+
return true;
1461+
}
1462+
1463+
static inline void mem_cgroup_unlock_pages(void)
1464+
{
1465+
rcu_read_unlock();
1466+
}
1467+
14371468
static inline void mem_cgroup_handle_over_high(void)
14381469
{
14391470
}

include/linux/mm.h

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1490,6 +1490,11 @@ static inline unsigned long folio_pfn(struct folio *folio)
14901490
return page_to_pfn(&folio->page);
14911491
}
14921492

1493+
static inline struct folio *pfn_folio(unsigned long pfn)
1494+
{
1495+
return page_folio(pfn_to_page(pfn));
1496+
}
1497+
14931498
static inline atomic_t *folio_pincount_ptr(struct folio *folio)
14941499
{
14951500
return &folio_page(folio, 1)->compound_pincount;

include/linux/mmzone.h

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -375,6 +375,7 @@ enum lruvec_flags {
375375
#ifndef __GENERATING_BOUNDS_H
376376

377377
struct lruvec;
378+
struct page_vma_mapped_walk;
378379

379380
#define LRU_GEN_MASK ((BIT(LRU_GEN_WIDTH) - 1) << LRU_GEN_PGOFF)
380381
#define LRU_REFS_MASK ((BIT(LRU_REFS_WIDTH) - 1) << LRU_REFS_PGOFF)
@@ -430,6 +431,7 @@ struct lru_gen_struct {
430431
};
431432

432433
void lru_gen_init_lruvec(struct lruvec *lruvec);
434+
void lru_gen_look_around(struct page_vma_mapped_walk *pvmw);
433435

434436
#ifdef CONFIG_MEMCG
435437
void lru_gen_init_memcg(struct mem_cgroup *memcg);
@@ -442,6 +444,10 @@ static inline void lru_gen_init_lruvec(struct lruvec *lruvec)
442444
{
443445
}
444446

447+
static inline void lru_gen_look_around(struct page_vma_mapped_walk *pvmw)
448+
{
449+
}
450+
445451
#ifdef CONFIG_MEMCG
446452
static inline void lru_gen_init_memcg(struct mem_cgroup *memcg)
447453
{

mm/internal.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -83,6 +83,7 @@ vm_fault_t do_swap_page(struct vm_fault *vmf);
8383
void folio_rotate_reclaimable(struct folio *folio);
8484
bool __folio_end_writeback(struct folio *folio);
8585
void deactivate_file_folio(struct folio *folio);
86+
void folio_activate(struct folio *folio);
8687

8788
void free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *start_vma,
8889
unsigned long floor, unsigned long ceiling);

mm/memcontrol.c

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2789,6 +2789,7 @@ static void commit_charge(struct folio *folio, struct mem_cgroup *memcg)
27892789
* - LRU isolation
27902790
* - lock_page_memcg()
27912791
* - exclusive reference
2792+
* - mem_cgroup_trylock_pages()
27922793
*/
27932794
folio->memcg_data = (unsigned long)memcg;
27942795
}

mm/rmap.c

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -825,6 +825,12 @@ static bool folio_referenced_one(struct folio *folio,
825825
}
826826

827827
if (pvmw.pte) {
828+
if (lru_gen_enabled() && pte_young(*pvmw.pte) &&
829+
!(vma->vm_flags & (VM_SEQ_READ | VM_RAND_READ))) {
830+
lru_gen_look_around(&pvmw);
831+
referenced++;
832+
}
833+
828834
if (ptep_clear_flush_young_notify(vma, address,
829835
pvmw.pte)) {
830836
/*

mm/swap.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -366,7 +366,7 @@ static void folio_activate_drain(int cpu)
366366
folio_batch_move_lru(fbatch, folio_activate_fn);
367367
}
368368

369-
static void folio_activate(struct folio *folio)
369+
void folio_activate(struct folio *folio)
370370
{
371371
if (folio_test_lru(folio) && !folio_test_active(folio) &&
372372
!folio_test_unevictable(folio)) {
@@ -385,7 +385,7 @@ static inline void folio_activate_drain(int cpu)
385385
{
386386
}
387387

388-
static void folio_activate(struct folio *folio)
388+
void folio_activate(struct folio *folio)
389389
{
390390
struct lruvec *lruvec;
391391

mm/vmscan.c

Lines changed: 184 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1635,6 +1635,11 @@ static unsigned int shrink_page_list(struct list_head *page_list,
16351635
if (!sc->may_unmap && folio_mapped(folio))
16361636
goto keep_locked;
16371637

1638+
/* folio_update_gen() tried to promote this page? */
1639+
if (lru_gen_enabled() && !ignore_references &&
1640+
folio_mapped(folio) && folio_test_referenced(folio))
1641+
goto keep_locked;
1642+
16381643
/*
16391644
* The number of dirty pages determines if a node is marked
16401645
* reclaim_congested. kswapd will stall and start writing
@@ -3219,6 +3224,29 @@ static bool positive_ctrl_err(struct ctrl_pos *sp, struct ctrl_pos *pv)
32193224
* the aging
32203225
******************************************************************************/
32213226

3227+
/* promote pages accessed through page tables */
3228+
static int folio_update_gen(struct folio *folio, int gen)
3229+
{
3230+
unsigned long new_flags, old_flags = READ_ONCE(folio->flags);
3231+
3232+
VM_WARN_ON_ONCE(gen >= MAX_NR_GENS);
3233+
VM_WARN_ON_ONCE(!rcu_read_lock_held());
3234+
3235+
do {
3236+
/* lru_gen_del_folio() has isolated this page? */
3237+
if (!(old_flags & LRU_GEN_MASK)) {
3238+
/* for shrink_page_list() */
3239+
new_flags = old_flags | BIT(PG_referenced);
3240+
continue;
3241+
}
3242+
3243+
new_flags = old_flags & ~(LRU_GEN_MASK | LRU_REFS_MASK | LRU_REFS_FLAGS);
3244+
new_flags |= (gen + 1UL) << LRU_GEN_PGOFF;
3245+
} while (!try_cmpxchg(&folio->flags, &old_flags, new_flags));
3246+
3247+
return ((old_flags & LRU_GEN_MASK) >> LRU_GEN_PGOFF) - 1;
3248+
}
3249+
32223250
/* protect pages accessed multiple times through file descriptors */
32233251
static int folio_inc_gen(struct lruvec *lruvec, struct folio *folio, bool reclaiming)
32243252
{
@@ -3230,6 +3258,11 @@ static int folio_inc_gen(struct lruvec *lruvec, struct folio *folio, bool reclai
32303258
VM_WARN_ON_ONCE_FOLIO(!(old_flags & LRU_GEN_MASK), folio);
32313259

32323260
do {
3261+
new_gen = ((old_flags & LRU_GEN_MASK) >> LRU_GEN_PGOFF) - 1;
3262+
/* folio_update_gen() has promoted this page? */
3263+
if (new_gen >= 0 && new_gen != old_gen)
3264+
return new_gen;
3265+
32333266
new_gen = (old_gen + 1) % MAX_NR_GENS;
32343267

32353268
new_flags = old_flags & ~(LRU_GEN_MASK | LRU_REFS_MASK | LRU_REFS_FLAGS);
@@ -3244,6 +3277,43 @@ static int folio_inc_gen(struct lruvec *lruvec, struct folio *folio, bool reclai
32443277
return new_gen;
32453278
}
32463279

3280+
static unsigned long get_pte_pfn(pte_t pte, struct vm_area_struct *vma, unsigned long addr)
3281+
{
3282+
unsigned long pfn = pte_pfn(pte);
3283+
3284+
VM_WARN_ON_ONCE(addr < vma->vm_start || addr >= vma->vm_end);
3285+
3286+
if (!pte_present(pte) || is_zero_pfn(pfn))
3287+
return -1;
3288+
3289+
if (WARN_ON_ONCE(pte_devmap(pte) || pte_special(pte)))
3290+
return -1;
3291+
3292+
if (WARN_ON_ONCE(!pfn_valid(pfn)))
3293+
return -1;
3294+
3295+
return pfn;
3296+
}
3297+
3298+
static struct folio *get_pfn_folio(unsigned long pfn, struct mem_cgroup *memcg,
3299+
struct pglist_data *pgdat)
3300+
{
3301+
struct folio *folio;
3302+
3303+
/* try to avoid unnecessary memory loads */
3304+
if (pfn < pgdat->node_start_pfn || pfn >= pgdat_end_pfn(pgdat))
3305+
return NULL;
3306+
3307+
folio = pfn_folio(pfn);
3308+
if (folio_nid(folio) != pgdat->node_id)
3309+
return NULL;
3310+
3311+
if (folio_memcg_rcu(folio) != memcg)
3312+
return NULL;
3313+
3314+
return folio;
3315+
}
3316+
32473317
static void inc_min_seq(struct lruvec *lruvec, int type)
32483318
{
32493319
struct lru_gen_struct *lrugen = &lruvec->lrugen;
@@ -3443,6 +3513,114 @@ static void lru_gen_age_node(struct pglist_data *pgdat, struct scan_control *sc)
34433513
} while ((memcg = mem_cgroup_iter(NULL, memcg, NULL)));
34443514
}
34453515

3516+
/*
3517+
* This function exploits spatial locality when shrink_page_list() walks the
3518+
* rmap. It scans the adjacent PTEs of a young PTE and promotes hot pages.
3519+
*/
3520+
void lru_gen_look_around(struct page_vma_mapped_walk *pvmw)
3521+
{
3522+
int i;
3523+
pte_t *pte;
3524+
unsigned long start;
3525+
unsigned long end;
3526+
unsigned long addr;
3527+
unsigned long bitmap[BITS_TO_LONGS(MIN_LRU_BATCH)] = {};
3528+
struct folio *folio = pfn_folio(pvmw->pfn);
3529+
struct mem_cgroup *memcg = folio_memcg(folio);
3530+
struct pglist_data *pgdat = folio_pgdat(folio);
3531+
struct lruvec *lruvec = mem_cgroup_lruvec(memcg, pgdat);
3532+
DEFINE_MAX_SEQ(lruvec);
3533+
int old_gen, new_gen = lru_gen_from_seq(max_seq);
3534+
3535+
lockdep_assert_held(pvmw->ptl);
3536+
VM_WARN_ON_ONCE_FOLIO(folio_test_lru(folio), folio);
3537+
3538+
if (spin_is_contended(pvmw->ptl))
3539+
return;
3540+
3541+
start = max(pvmw->address & PMD_MASK, pvmw->vma->vm_start);
3542+
end = min(pvmw->address | ~PMD_MASK, pvmw->vma->vm_end - 1) + 1;
3543+
3544+
if (end - start > MIN_LRU_BATCH * PAGE_SIZE) {
3545+
if (pvmw->address - start < MIN_LRU_BATCH * PAGE_SIZE / 2)
3546+
end = start + MIN_LRU_BATCH * PAGE_SIZE;
3547+
else if (end - pvmw->address < MIN_LRU_BATCH * PAGE_SIZE / 2)
3548+
start = end - MIN_LRU_BATCH * PAGE_SIZE;
3549+
else {
3550+
start = pvmw->address - MIN_LRU_BATCH * PAGE_SIZE / 2;
3551+
end = pvmw->address + MIN_LRU_BATCH * PAGE_SIZE / 2;
3552+
}
3553+
}
3554+
3555+
pte = pvmw->pte - (pvmw->address - start) / PAGE_SIZE;
3556+
3557+
rcu_read_lock();
3558+
arch_enter_lazy_mmu_mode();
3559+
3560+
for (i = 0, addr = start; addr != end; i++, addr += PAGE_SIZE) {
3561+
unsigned long pfn;
3562+
3563+
pfn = get_pte_pfn(pte[i], pvmw->vma, addr);
3564+
if (pfn == -1)
3565+
continue;
3566+
3567+
if (!pte_young(pte[i]))
3568+
continue;
3569+
3570+
folio = get_pfn_folio(pfn, memcg, pgdat);
3571+
if (!folio)
3572+
continue;
3573+
3574+
if (!ptep_test_and_clear_young(pvmw->vma, addr, pte + i))
3575+
VM_WARN_ON_ONCE(true);
3576+
3577+
if (pte_dirty(pte[i]) && !folio_test_dirty(folio) &&
3578+
!(folio_test_anon(folio) && folio_test_swapbacked(folio) &&
3579+
!folio_test_swapcache(folio)))
3580+
folio_mark_dirty(folio);
3581+
3582+
old_gen = folio_lru_gen(folio);
3583+
if (old_gen < 0)
3584+
folio_set_referenced(folio);
3585+
else if (old_gen != new_gen)
3586+
__set_bit(i, bitmap);
3587+
}
3588+
3589+
arch_leave_lazy_mmu_mode();
3590+
rcu_read_unlock();
3591+
3592+
if (bitmap_weight(bitmap, MIN_LRU_BATCH) < PAGEVEC_SIZE) {
3593+
for_each_set_bit(i, bitmap, MIN_LRU_BATCH) {
3594+
folio = pfn_folio(pte_pfn(pte[i]));
3595+
folio_activate(folio);
3596+
}
3597+
return;
3598+
}
3599+
3600+
/* folio_update_gen() requires stable folio_memcg() */
3601+
if (!mem_cgroup_trylock_pages(memcg))
3602+
return;
3603+
3604+
spin_lock_irq(&lruvec->lru_lock);
3605+
new_gen = lru_gen_from_seq(lruvec->lrugen.max_seq);
3606+
3607+
for_each_set_bit(i, bitmap, MIN_LRU_BATCH) {
3608+
folio = pfn_folio(pte_pfn(pte[i]));
3609+
if (folio_memcg_rcu(folio) != memcg)
3610+
continue;
3611+
3612+
old_gen = folio_update_gen(folio, new_gen);
3613+
if (old_gen < 0 || old_gen == new_gen)
3614+
continue;
3615+
3616+
lru_gen_update_size(lruvec, folio, old_gen, new_gen);
3617+
}
3618+
3619+
spin_unlock_irq(&lruvec->lru_lock);
3620+
3621+
mem_cgroup_unlock_pages();
3622+
}
3623+
34463624
/******************************************************************************
34473625
* the eviction
34483626
******************************************************************************/
@@ -3479,6 +3657,12 @@ static bool sort_folio(struct lruvec *lruvec, struct folio *folio, int tier_idx)
34793657
return true;
34803658
}
34813659

3660+
/* promoted */
3661+
if (gen != lru_gen_from_seq(lrugen->min_seq[type])) {
3662+
list_move(&folio->lru, &lrugen->lists[gen][type][zone]);
3663+
return true;
3664+
}
3665+
34823666
/* protected */
34833667
if (tier > tier_idx) {
34843668
int hist = lru_hist_from_seq(lrugen->min_seq[type]);

0 commit comments

Comments
 (0)