Skip to content

Commit f1ee018

Browse files
Matthew Wilcox (Oracle)akpm00
authored andcommitted
mm: use __page_cache_release() in folios_put()
Pass a pointer to the lruvec so we can take advantage of the folio_lruvec_relock_irqsave(). Adjust the calling convention of folio_lruvec_relock_irqsave() to suit and add a page_cache_release() wrapper. Link: https://lkml.kernel.org/r/[email protected] Signed-off-by: Matthew Wilcox (Oracle) <[email protected]> Cc: David Hildenbrand <[email protected]> Cc: Mel Gorman <[email protected]> Cc: Ryan Roberts <[email protected]> Signed-off-by: Andrew Morton <[email protected]>
1 parent 24835f8 commit f1ee018

File tree

2 files changed

+37
-41
lines changed

2 files changed

+37
-41
lines changed

include/linux/memcontrol.h

Lines changed: 8 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -1705,18 +1705,18 @@ static inline struct lruvec *folio_lruvec_relock_irq(struct folio *folio,
17051705
return folio_lruvec_lock_irq(folio);
17061706
}
17071707

1708-
/* Don't lock again iff page's lruvec locked */
1709-
static inline struct lruvec *folio_lruvec_relock_irqsave(struct folio *folio,
1710-
struct lruvec *locked_lruvec, unsigned long *flags)
1708+
/* Don't lock again iff folio's lruvec locked */
1709+
static inline void folio_lruvec_relock_irqsave(struct folio *folio,
1710+
struct lruvec **lruvecp, unsigned long *flags)
17111711
{
1712-
if (locked_lruvec) {
1713-
if (folio_matches_lruvec(folio, locked_lruvec))
1714-
return locked_lruvec;
1712+
if (*lruvecp) {
1713+
if (folio_matches_lruvec(folio, *lruvecp))
1714+
return;
17151715

1716-
unlock_page_lruvec_irqrestore(locked_lruvec, *flags);
1716+
unlock_page_lruvec_irqrestore(*lruvecp, *flags);
17171717
}
17181718

1719-
return folio_lruvec_lock_irqsave(folio, flags);
1719+
*lruvecp = folio_lruvec_lock_irqsave(folio, flags);
17201720
}
17211721

17221722
#ifdef CONFIG_CGROUP_WRITEBACK

mm/swap.c

Lines changed: 29 additions & 33 deletions
Original file line numberDiff line numberDiff line change
@@ -74,22 +74,21 @@ static DEFINE_PER_CPU(struct cpu_fbatches, cpu_fbatches) = {
7474
.lock = INIT_LOCAL_LOCK(lock),
7575
};
7676

77-
/*
78-
* This path almost never happens for VM activity - pages are normally freed
79-
* in batches. But it gets used by networking - and for compound pages.
80-
*/
81-
static void __page_cache_release(struct folio *folio)
77+
static void __page_cache_release(struct folio *folio, struct lruvec **lruvecp,
78+
unsigned long *flagsp)
8279
{
8380
if (folio_test_lru(folio)) {
84-
struct lruvec *lruvec;
85-
unsigned long flags;
86-
87-
lruvec = folio_lruvec_lock_irqsave(folio, &flags);
88-
lruvec_del_folio(lruvec, folio);
81+
folio_lruvec_relock_irqsave(folio, lruvecp, flagsp);
82+
lruvec_del_folio(*lruvecp, folio);
8983
__folio_clear_lru_flags(folio);
90-
unlock_page_lruvec_irqrestore(lruvec, flags);
9184
}
92-
/* See comment on folio_test_mlocked in folios_put() */
85+
86+
/*
87+
* In rare cases, when truncation or holepunching raced with
88+
* munlock after VM_LOCKED was cleared, Mlocked may still be
89+
* found set here. This does not indicate a problem, unless
90+
* "unevictable_pgs_cleared" appears worryingly large.
91+
*/
9392
if (unlikely(folio_test_mlocked(folio))) {
9493
long nr_pages = folio_nr_pages(folio);
9594

@@ -99,9 +98,23 @@ static void __page_cache_release(struct folio *folio)
9998
}
10099
}
101100

101+
/*
102+
* This path almost never happens for VM activity - pages are normally freed
103+
* in batches. But it gets used by networking - and for compound pages.
104+
*/
105+
static void page_cache_release(struct folio *folio)
106+
{
107+
struct lruvec *lruvec = NULL;
108+
unsigned long flags;
109+
110+
__page_cache_release(folio, &lruvec, &flags);
111+
if (lruvec)
112+
unlock_page_lruvec_irqrestore(lruvec, flags);
113+
}
114+
102115
static void __folio_put_small(struct folio *folio)
103116
{
104-
__page_cache_release(folio);
117+
page_cache_release(folio);
105118
mem_cgroup_uncharge(folio);
106119
free_unref_page(&folio->page, 0);
107120
}
@@ -115,7 +128,7 @@ static void __folio_put_large(struct folio *folio)
115128
* be called for hugetlb (it has a separate hugetlb_cgroup.)
116129
*/
117130
if (!folio_test_hugetlb(folio))
118-
__page_cache_release(folio);
131+
page_cache_release(folio);
119132
destroy_large_folio(folio);
120133
}
121134

@@ -216,7 +229,7 @@ static void folio_batch_move_lru(struct folio_batch *fbatch, move_fn_t move_fn)
216229
if (move_fn != lru_add_fn && !folio_test_clear_lru(folio))
217230
continue;
218231

219-
lruvec = folio_lruvec_relock_irqsave(folio, lruvec, &flags);
232+
folio_lruvec_relock_irqsave(folio, &lruvec, &flags);
220233
move_fn(lruvec, folio);
221234

222235
folio_set_lru(folio);
@@ -999,24 +1012,7 @@ void folios_put_refs(struct folio_batch *folios, unsigned int *refs)
9991012
continue;
10001013
}
10011014

1002-
if (folio_test_lru(folio)) {
1003-
lruvec = folio_lruvec_relock_irqsave(folio, lruvec,
1004-
&flags);
1005-
lruvec_del_folio(lruvec, folio);
1006-
__folio_clear_lru_flags(folio);
1007-
}
1008-
1009-
/*
1010-
* In rare cases, when truncation or holepunching raced with
1011-
* munlock after VM_LOCKED was cleared, Mlocked may still be
1012-
* found set here. This does not indicate a problem, unless
1013-
* "unevictable_pgs_cleared" appears worryingly large.
1014-
*/
1015-
if (unlikely(folio_test_mlocked(folio))) {
1016-
__folio_clear_mlocked(folio);
1017-
zone_stat_sub_folio(folio, NR_MLOCK);
1018-
count_vm_event(UNEVICTABLE_PGCLEARED);
1019-
}
1015+
__page_cache_release(folio, &lruvec, &flags);
10201016

10211017
if (j != i)
10221018
folios->folios[j] = folio;

0 commit comments

Comments
 (0)