Skip to content

Commit 3852f67

Browse files
JoonsooKimtorvalds
authored andcommitted
mm/swapcache: support to handle the shadow entries
Workingset detection for anonymous page will be implemented in the following patch and it requires to store the shadow entries into the swapcache. This patch implements an infrastructure to store the shadow entry in the swapcache. Signed-off-by: Joonsoo Kim <[email protected]> Signed-off-by: Andrew Morton <[email protected]> Acked-by: Johannes Weiner <[email protected]> Cc: Hugh Dickins <[email protected]> Cc: Matthew Wilcox <[email protected]> Cc: Mel Gorman <[email protected]> Cc: Michal Hocko <[email protected]> Cc: Minchan Kim <[email protected]> Cc: Vlastimil Babka <[email protected]> Link: http://lkml.kernel.org/r/[email protected] Signed-off-by: Linus Torvalds <[email protected]>
1 parent 170b04b commit 3852f67

File tree

5 files changed

+69
-12
lines changed

5 files changed

+69
-12
lines changed

include/linux/swap.h

Lines changed: 13 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -414,9 +414,13 @@ extern struct address_space *swapper_spaces[];
414414
extern unsigned long total_swapcache_pages(void);
415415
extern void show_swap_cache_info(void);
416416
extern int add_to_swap(struct page *page);
417-
extern int add_to_swap_cache(struct page *, swp_entry_t, gfp_t);
418-
extern void __delete_from_swap_cache(struct page *, swp_entry_t entry);
417+
extern int add_to_swap_cache(struct page *page, swp_entry_t entry,
418+
gfp_t gfp, void **shadowp);
419+
extern void __delete_from_swap_cache(struct page *page,
420+
swp_entry_t entry, void *shadow);
419421
extern void delete_from_swap_cache(struct page *);
422+
extern void clear_shadow_from_swap_cache(int type, unsigned long begin,
423+
unsigned long end);
420424
extern void free_page_and_swap_cache(struct page *);
421425
extern void free_pages_and_swap_cache(struct page **, int);
422426
extern struct page *lookup_swap_cache(swp_entry_t entry,
@@ -570,20 +574,25 @@ static inline int add_to_swap(struct page *page)
570574
}
571575

572576
static inline int add_to_swap_cache(struct page *page, swp_entry_t entry,
573-
gfp_t gfp_mask)
577+
gfp_t gfp_mask, void **shadowp)
574578
{
575579
return -1;
576580
}
577581

578582
static inline void __delete_from_swap_cache(struct page *page,
579-
swp_entry_t entry)
583+
swp_entry_t entry, void *shadow)
580584
{
581585
}
582586

583587
static inline void delete_from_swap_cache(struct page *page)
584588
{
585589
}
586590

591+
static inline void clear_shadow_from_swap_cache(int type, unsigned long begin,
592+
unsigned long end)
593+
{
594+
}
595+
587596
static inline int page_swapcount(struct page *page)
588597
{
589598
return 0;

mm/shmem.c

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1434,7 +1434,8 @@ static int shmem_writepage(struct page *page, struct writeback_control *wbc)
14341434
list_add(&info->swaplist, &shmem_swaplist);
14351435

14361436
if (add_to_swap_cache(page, swap,
1437-
__GFP_HIGH | __GFP_NOMEMALLOC | __GFP_NOWARN) == 0) {
1437+
__GFP_HIGH | __GFP_NOMEMALLOC | __GFP_NOWARN,
1438+
NULL) == 0) {
14381439
spin_lock_irq(&info->lock);
14391440
shmem_recalc_inode(inode);
14401441
info->swapped++;

mm/swap_state.c

Lines changed: 51 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -110,12 +110,14 @@ void show_swap_cache_info(void)
110110
* add_to_swap_cache resembles add_to_page_cache_locked on swapper_space,
111111
* but sets SwapCache flag and private instead of mapping and index.
112112
*/
113-
int add_to_swap_cache(struct page *page, swp_entry_t entry, gfp_t gfp)
113+
int add_to_swap_cache(struct page *page, swp_entry_t entry,
114+
gfp_t gfp, void **shadowp)
114115
{
115116
struct address_space *address_space = swap_address_space(entry);
116117
pgoff_t idx = swp_offset(entry);
117118
XA_STATE_ORDER(xas, &address_space->i_pages, idx, compound_order(page));
118119
unsigned long i, nr = hpage_nr_pages(page);
120+
void *old;
119121

120122
VM_BUG_ON_PAGE(!PageLocked(page), page);
121123
VM_BUG_ON_PAGE(PageSwapCache(page), page);
@@ -125,16 +127,25 @@ int add_to_swap_cache(struct page *page, swp_entry_t entry, gfp_t gfp)
125127
SetPageSwapCache(page);
126128

127129
do {
130+
unsigned long nr_shadows = 0;
131+
128132
xas_lock_irq(&xas);
129133
xas_create_range(&xas);
130134
if (xas_error(&xas))
131135
goto unlock;
132136
for (i = 0; i < nr; i++) {
133137
VM_BUG_ON_PAGE(xas.xa_index != idx + i, page);
138+
old = xas_load(&xas);
139+
if (xa_is_value(old)) {
140+
nr_shadows++;
141+
if (shadowp)
142+
*shadowp = old;
143+
}
134144
set_page_private(page + i, entry.val + i);
135145
xas_store(&xas, page);
136146
xas_next(&xas);
137147
}
148+
address_space->nrexceptional -= nr_shadows;
138149
address_space->nrpages += nr;
139150
__mod_node_page_state(page_pgdat(page), NR_FILE_PAGES, nr);
140151
ADD_CACHE_INFO(add_total, nr);
@@ -154,7 +165,8 @@ int add_to_swap_cache(struct page *page, swp_entry_t entry, gfp_t gfp)
154165
* This must be called only on pages that have
155166
* been verified to be in the swap cache.
156167
*/
157-
void __delete_from_swap_cache(struct page *page, swp_entry_t entry)
168+
void __delete_from_swap_cache(struct page *page,
169+
swp_entry_t entry, void *shadow)
158170
{
159171
struct address_space *address_space = swap_address_space(entry);
160172
int i, nr = hpage_nr_pages(page);
@@ -166,12 +178,14 @@ void __delete_from_swap_cache(struct page *page, swp_entry_t entry)
166178
VM_BUG_ON_PAGE(PageWriteback(page), page);
167179

168180
for (i = 0; i < nr; i++) {
169-
void *entry = xas_store(&xas, NULL);
181+
void *entry = xas_store(&xas, shadow);
170182
VM_BUG_ON_PAGE(entry != page, entry);
171183
set_page_private(page + i, 0);
172184
xas_next(&xas);
173185
}
174186
ClearPageSwapCache(page);
187+
if (shadow)
188+
address_space->nrexceptional += nr;
175189
address_space->nrpages -= nr;
176190
__mod_node_page_state(page_pgdat(page), NR_FILE_PAGES, -nr);
177191
ADD_CACHE_INFO(del_total, nr);
@@ -208,7 +222,7 @@ int add_to_swap(struct page *page)
208222
* Add it to the swap cache.
209223
*/
210224
err = add_to_swap_cache(page, entry,
211-
__GFP_HIGH|__GFP_NOMEMALLOC|__GFP_NOWARN);
225+
__GFP_HIGH|__GFP_NOMEMALLOC|__GFP_NOWARN, NULL);
212226
if (err)
213227
/*
214228
* add_to_swap_cache() doesn't return -EEXIST, so we can safely
@@ -246,13 +260,44 @@ void delete_from_swap_cache(struct page *page)
246260
struct address_space *address_space = swap_address_space(entry);
247261

248262
xa_lock_irq(&address_space->i_pages);
249-
__delete_from_swap_cache(page, entry);
263+
__delete_from_swap_cache(page, entry, NULL);
250264
xa_unlock_irq(&address_space->i_pages);
251265

252266
put_swap_page(page, entry);
253267
page_ref_sub(page, hpage_nr_pages(page));
254268
}
255269

270+
void clear_shadow_from_swap_cache(int type, unsigned long begin,
271+
unsigned long end)
272+
{
273+
unsigned long curr = begin;
274+
void *old;
275+
276+
for (;;) {
277+
unsigned long nr_shadows = 0;
278+
swp_entry_t entry = swp_entry(type, curr);
279+
struct address_space *address_space = swap_address_space(entry);
280+
XA_STATE(xas, &address_space->i_pages, curr);
281+
282+
xa_lock_irq(&address_space->i_pages);
283+
xas_for_each(&xas, old, end) {
284+
if (!xa_is_value(old))
285+
continue;
286+
xas_store(&xas, NULL);
287+
nr_shadows++;
288+
}
289+
address_space->nrexceptional -= nr_shadows;
290+
xa_unlock_irq(&address_space->i_pages);
291+
292+
/* search the next swapcache until we meet end */
293+
curr >>= SWAP_ADDRESS_SPACE_SHIFT;
294+
curr++;
295+
curr <<= SWAP_ADDRESS_SPACE_SHIFT;
296+
if (curr > end)
297+
break;
298+
}
299+
}
300+
256301
/*
257302
* If we are the only user, then try to free up the swap cache.
258303
*
@@ -429,7 +474,7 @@ struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
429474
__SetPageSwapBacked(page);
430475

431476
/* May fail (-ENOMEM) if XArray node allocation failed. */
432-
if (add_to_swap_cache(page, entry, gfp_mask & GFP_RECLAIM_MASK)) {
477+
if (add_to_swap_cache(page, entry, gfp_mask & GFP_RECLAIM_MASK, NULL)) {
433478
put_swap_page(page, entry);
434479
goto fail_unlock;
435480
}

mm/swapfile.c

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -696,6 +696,7 @@ static void add_to_avail_list(struct swap_info_struct *p)
696696
static void swap_range_free(struct swap_info_struct *si, unsigned long offset,
697697
unsigned int nr_entries)
698698
{
699+
unsigned long begin = offset;
699700
unsigned long end = offset + nr_entries - 1;
700701
void (*swap_slot_free_notify)(struct block_device *, unsigned long);
701702

@@ -721,6 +722,7 @@ static void swap_range_free(struct swap_info_struct *si, unsigned long offset,
721722
swap_slot_free_notify(si->bdev, offset);
722723
offset++;
723724
}
725+
clear_shadow_from_swap_cache(si->type, begin, end);
724726
}
725727

726728
static void set_cluster_next(struct swap_info_struct *si, unsigned long next)

mm/vmscan.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -896,7 +896,7 @@ static int __remove_mapping(struct address_space *mapping, struct page *page,
896896
if (PageSwapCache(page)) {
897897
swp_entry_t swap = { .val = page_private(page) };
898898
mem_cgroup_swapout(page, swap);
899-
__delete_from_swap_cache(page, swap);
899+
__delete_from_swap_cache(page, swap, NULL);
900900
xa_unlock_irqrestore(&mapping->i_pages, flags);
901901
put_swap_page(page, swap);
902902
workingset_eviction(page, target_memcg);

0 commit comments

Comments
 (0)