Skip to content

Commit aae466b

Browse files
JoonsooKimtorvalds
authored andcommitted
mm/swap: implement workingset detection for anonymous LRU
This patch implements workingset detection for anonymous LRU. All the infrastructure is implemented by the previous patches so this patch just activates the workingset detection by installing/retrieving the shadow entry and adding refault calculation. Signed-off-by: Joonsoo Kim <[email protected]> Signed-off-by: Andrew Morton <[email protected]> Acked-by: Johannes Weiner <[email protected]> Acked-by: Vlastimil Babka <[email protected]> Cc: Hugh Dickins <[email protected]> Cc: Matthew Wilcox <[email protected]> Cc: Mel Gorman <[email protected]> Cc: Michal Hocko <[email protected]> Cc: Minchan Kim <[email protected]> Link: http://lkml.kernel.org/r/[email protected] Signed-off-by: Linus Torvalds <[email protected]>
1 parent 3852f67 commit aae466b

File tree

5 files changed

+43
-19
lines changed

5 files changed

+43
-19
lines changed

include/linux/swap.h

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -414,6 +414,7 @@ extern struct address_space *swapper_spaces[];
414414
extern unsigned long total_swapcache_pages(void);
415415
extern void show_swap_cache_info(void);
416416
extern int add_to_swap(struct page *page);
417+
extern void *get_shadow_from_swap_cache(swp_entry_t entry);
417418
extern int add_to_swap_cache(struct page *page, swp_entry_t entry,
418419
gfp_t gfp, void **shadowp);
419420
extern void __delete_from_swap_cache(struct page *page,
@@ -573,6 +574,11 @@ static inline int add_to_swap(struct page *page)
573574
return 0;
574575
}
575576

577+
static inline void *get_shadow_from_swap_cache(swp_entry_t entry)
578+
{
579+
return NULL;
580+
}
581+
576582
static inline int add_to_swap_cache(struct page *page, swp_entry_t entry,
577583
gfp_t gfp_mask, void **shadowp)
578584
{

mm/memory.c

Lines changed: 4 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -3098,6 +3098,7 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
30983098
int locked;
30993099
int exclusive = 0;
31003100
vm_fault_t ret = 0;
3101+
void *shadow = NULL;
31013102

31023103
if (!pte_unmap_same(vma->vm_mm, vmf->pmd, vmf->pte, vmf->orig_pte))
31033104
goto out;
@@ -3149,13 +3150,9 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
31493150
goto out_page;
31503151
}
31513152

3152-
/*
3153-
* XXX: Move to lru_cache_add() when it
3154-
* supports new vs putback
3155-
*/
3156-
spin_lock_irq(&page_pgdat(page)->lru_lock);
3157-
lru_note_cost_page(page);
3158-
spin_unlock_irq(&page_pgdat(page)->lru_lock);
3153+
shadow = get_shadow_from_swap_cache(entry);
3154+
if (shadow)
3155+
workingset_refault(page, shadow);
31593156

31603157
lru_cache_add(page);
31613158
swap_readpage(page, true);

mm/swap_state.c

Lines changed: 18 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -106,6 +106,20 @@ void show_swap_cache_info(void)
106106
printk("Total swap = %lukB\n", total_swap_pages << (PAGE_SHIFT - 10));
107107
}
108108

109+
void *get_shadow_from_swap_cache(swp_entry_t entry)
110+
{
111+
struct address_space *address_space = swap_address_space(entry);
112+
pgoff_t idx = swp_offset(entry);
113+
struct page *page;
114+
115+
page = find_get_entry(address_space, idx);
116+
if (xa_is_value(page))
117+
return page;
118+
if (page)
119+
put_page(page);
120+
return NULL;
121+
}
122+
109123
/*
110124
* add_to_swap_cache resembles add_to_page_cache_locked on swapper_space,
111125
* but sets SwapCache flag and private instead of mapping and index.
@@ -406,6 +420,7 @@ struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
406420
{
407421
struct swap_info_struct *si;
408422
struct page *page;
423+
void *shadow = NULL;
409424

410425
*new_page_allocated = false;
411426

@@ -474,7 +489,7 @@ struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
474489
__SetPageSwapBacked(page);
475490

476491
/* May fail (-ENOMEM) if XArray node allocation failed. */
477-
if (add_to_swap_cache(page, entry, gfp_mask & GFP_RECLAIM_MASK, NULL)) {
492+
if (add_to_swap_cache(page, entry, gfp_mask & GFP_RECLAIM_MASK, &shadow)) {
478493
put_swap_page(page, entry);
479494
goto fail_unlock;
480495
}
@@ -484,10 +499,8 @@ struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
484499
goto fail_unlock;
485500
}
486501

487-
/* XXX: Move to lru_cache_add() when it supports new vs putback */
488-
spin_lock_irq(&page_pgdat(page)->lru_lock);
489-
lru_note_cost_page(page);
490-
spin_unlock_irq(&page_pgdat(page)->lru_lock);
502+
if (shadow)
503+
workingset_refault(page, shadow);
491504

492505
/* Caller will initiate read into locked page */
493506
SetPageWorkingset(page);

mm/vmscan.c

Lines changed: 4 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -854,6 +854,7 @@ static int __remove_mapping(struct address_space *mapping, struct page *page,
854854
{
855855
unsigned long flags;
856856
int refcount;
857+
void *shadow = NULL;
857858

858859
BUG_ON(!PageLocked(page));
859860
BUG_ON(mapping != page_mapping(page));
@@ -896,13 +897,13 @@ static int __remove_mapping(struct address_space *mapping, struct page *page,
896897
if (PageSwapCache(page)) {
897898
swp_entry_t swap = { .val = page_private(page) };
898899
mem_cgroup_swapout(page, swap);
899-
__delete_from_swap_cache(page, swap, NULL);
900+
if (reclaimed && !mapping_exiting(mapping))
901+
shadow = workingset_eviction(page, target_memcg);
902+
__delete_from_swap_cache(page, swap, shadow);
900903
xa_unlock_irqrestore(&mapping->i_pages, flags);
901904
put_swap_page(page, swap);
902-
workingset_eviction(page, target_memcg);
903905
} else {
904906
void (*freepage)(struct page *);
905-
void *shadow = NULL;
906907

907908
freepage = mapping->a_ops->freepage;
908909
/*

mm/workingset.c

Lines changed: 11 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -353,15 +353,22 @@ void workingset_refault(struct page *page, void *shadow)
353353
/*
354354
* Compare the distance to the existing workingset size. We
355355
* don't activate pages that couldn't stay resident even if
356-
* all the memory was available to the page cache. Whether
357-
* cache can compete with anon or not depends on having swap.
356+
* all the memory was available to the workingset. Whether
357+
* workingset competition needs to consider anon or not depends
358+
* on having swap.
358359
*/
359360
workingset_size = lruvec_page_state(eviction_lruvec, NR_ACTIVE_FILE);
360-
if (mem_cgroup_get_nr_swap_pages(memcg) > 0) {
361+
if (!file) {
361362
workingset_size += lruvec_page_state(eviction_lruvec,
362-
NR_INACTIVE_ANON);
363+
NR_INACTIVE_FILE);
364+
}
365+
if (mem_cgroup_get_nr_swap_pages(memcg) > 0) {
363366
workingset_size += lruvec_page_state(eviction_lruvec,
364367
NR_ACTIVE_ANON);
368+
if (file) {
369+
workingset_size += lruvec_page_state(eviction_lruvec,
370+
NR_INACTIVE_ANON);
371+
}
365372
}
366373
if (refault_distance > workingset_size)
367374
goto out;

0 commit comments

Comments
 (0)