Skip to content

Commit b9c91c4

Browse files
yosrym93akpm00
authored andcommitted
mm: zswap: support exclusive loads
Commit 71024cb ("frontswap: remove frontswap_tmem_exclusive_gets") removed support for exclusive loads from frontswap as it was not used. Bring back exclusive loads support to frontswap by adding an "exclusive" output parameter to frontswap_ops->load. On the zswap side, add a module parameter to enable/disable exclusive loads, and a config option to control the boot default value. Refactor zswap entry invalidation in zswap_frontswap_invalidate_page() into zswap_invalidate_entry() to reuse it in zswap_frontswap_load() if exclusive loads are enabled. With exclusive loads, we avoid having two copies of the same page in memory (compressed & uncompressed) after faulting it in from zswap. On the other hand, if the page is to be reclaimed again without being dirtied, it will be re-compressed. Compression is not usually slow, and a page that was just faulted in is less likely to be reclaimed again soon. Link: https://lkml.kernel.org/r/[email protected] Signed-off-by: Yosry Ahmed <[email protected]> Suggested-by: Yu Zhao <[email protected]> Acked-by: Johannes Weiner <[email protected]> Cc: Dan Streetman <[email protected]> Cc: Domenico Cerasuolo <[email protected]> Cc: Konrad Rzeszutek Wilk <[email protected]> Cc: Nhat Pham <[email protected]> Cc: Seth Jennings <[email protected]> Cc: Vitaly Wool <[email protected]> Signed-off-by: Andrew Morton <[email protected]>
1 parent 32b6a4a commit b9c91c4

File tree

4 files changed

+45
-11
lines changed

4 files changed

+45
-11
lines changed

include/linux/frontswap.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -10,7 +10,7 @@
1010
struct frontswap_ops {
1111
void (*init)(unsigned); /* this swap type was just swapon'ed */
1212
int (*store)(unsigned, pgoff_t, struct page *); /* store a page */
13-
int (*load)(unsigned, pgoff_t, struct page *); /* load a page */
13+
int (*load)(unsigned, pgoff_t, struct page *, bool *); /* load a page */
1414
void (*invalidate_page)(unsigned, pgoff_t); /* page no longer needed */
1515
void (*invalidate_area)(unsigned); /* swap type just swapoff'ed */
1616
};

mm/Kconfig

Lines changed: 16 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -46,6 +46,22 @@ config ZSWAP_DEFAULT_ON
4646
The selection made here can be overridden by using the kernel
4747
command line 'zswap.enabled=' option.
4848

49+
config ZSWAP_EXCLUSIVE_LOADS_DEFAULT_ON
50+
bool "Invalidate zswap entries when pages are loaded"
51+
depends on ZSWAP
52+
help
53+
If selected, exclusive loads for zswap will be enabled at boot,
54+
otherwise it will be disabled.
55+
56+
If exclusive loads are enabled, when a page is loaded from zswap,
57+
the zswap entry is invalidated at once, as opposed to leaving it
58+
in zswap until the swap entry is freed.
59+
60+
This avoids having two copies of the same page in memory
61+
(compressed and uncompressed) after faulting in a page from zswap.
62+
The cost is that if the page was never dirtied and needs to be
63+
swapped out again, it will be re-compressed.
64+
4965
choice
5066
prompt "Default compressor"
5167
depends on ZSWAP

mm/frontswap.c

Lines changed: 8 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -206,6 +206,7 @@ int __frontswap_load(struct page *page)
206206
int type = swp_type(entry);
207207
struct swap_info_struct *sis = swap_info[type];
208208
pgoff_t offset = swp_offset(entry);
209+
bool exclusive = false;
209210

210211
VM_BUG_ON(!frontswap_ops);
211212
VM_BUG_ON(!PageLocked(page));
@@ -215,9 +216,14 @@ int __frontswap_load(struct page *page)
215216
return -1;
216217

217218
/* Try loading from each implementation, until one succeeds. */
218-
ret = frontswap_ops->load(type, offset, page);
219-
if (ret == 0)
219+
ret = frontswap_ops->load(type, offset, page, &exclusive);
220+
if (ret == 0) {
220221
inc_frontswap_loads();
222+
if (exclusive) {
223+
SetPageDirty(page);
224+
__frontswap_clear(sis, offset);
225+
}
226+
}
221227
return ret;
222228
}
223229

mm/zswap.c

Lines changed: 20 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -138,6 +138,10 @@ static bool zswap_non_same_filled_pages_enabled = true;
138138
module_param_named(non_same_filled_pages_enabled, zswap_non_same_filled_pages_enabled,
139139
bool, 0644);
140140

141+
static bool zswap_exclusive_loads_enabled = IS_ENABLED(
142+
CONFIG_ZSWAP_EXCLUSIVE_LOADS_DEFAULT_ON);
143+
module_param_named(exclusive_loads, zswap_exclusive_loads_enabled, bool, 0644);
144+
141145
/*********************************
142146
* data structures
143147
**********************************/
@@ -1340,12 +1344,22 @@ static int zswap_frontswap_store(unsigned type, pgoff_t offset,
13401344
goto reject;
13411345
}
13421346

1347+
static void zswap_invalidate_entry(struct zswap_tree *tree,
1348+
struct zswap_entry *entry)
1349+
{
1350+
/* remove from rbtree */
1351+
zswap_rb_erase(&tree->rbroot, entry);
1352+
1353+
/* drop the initial reference from entry creation */
1354+
zswap_entry_put(tree, entry);
1355+
}
1356+
13431357
/*
13441358
* returns 0 if the page was successfully decompressed
13451359
* return -1 on entry not found or error
13461360
*/
13471361
static int zswap_frontswap_load(unsigned type, pgoff_t offset,
1348-
struct page *page)
1362+
struct page *page, bool *exclusive)
13491363
{
13501364
struct zswap_tree *tree = zswap_trees[type];
13511365
struct zswap_entry *entry;
@@ -1415,6 +1429,10 @@ static int zswap_frontswap_load(unsigned type, pgoff_t offset,
14151429
freeentry:
14161430
spin_lock(&tree->lock);
14171431
zswap_entry_put(tree, entry);
1432+
if (!ret && zswap_exclusive_loads_enabled) {
1433+
zswap_invalidate_entry(tree, entry);
1434+
*exclusive = true;
1435+
}
14181436
spin_unlock(&tree->lock);
14191437

14201438
return ret;
@@ -1434,13 +1452,7 @@ static void zswap_frontswap_invalidate_page(unsigned type, pgoff_t offset)
14341452
spin_unlock(&tree->lock);
14351453
return;
14361454
}
1437-
1438-
/* remove from rbtree */
1439-
zswap_rb_erase(&tree->rbroot, entry);
1440-
1441-
/* drop the initial reference from entry creation */
1442-
zswap_entry_put(tree, entry);
1443-
1455+
zswap_invalidate_entry(tree, entry);
14441456
spin_unlock(&tree->lock);
14451457
}
14461458

0 commit comments

Comments
 (0)