@@ -74,22 +74,21 @@ static DEFINE_PER_CPU(struct cpu_fbatches, cpu_fbatches) = {
74
74
.lock = INIT_LOCAL_LOCK (lock ),
75
75
};
76
76
77
- /*
78
- * This path almost never happens for VM activity - pages are normally freed
79
- * in batches. But it gets used by networking - and for compound pages.
80
- */
81
- static void __page_cache_release (struct folio * folio )
77
+ static void __page_cache_release (struct folio * folio , struct lruvec * * lruvecp ,
78
+ unsigned long * flagsp )
82
79
{
83
80
if (folio_test_lru (folio )) {
84
- struct lruvec * lruvec ;
85
- unsigned long flags ;
86
-
87
- lruvec = folio_lruvec_lock_irqsave (folio , & flags );
88
- lruvec_del_folio (lruvec , folio );
81
+ folio_lruvec_relock_irqsave (folio , lruvecp , flagsp );
82
+ lruvec_del_folio (* lruvecp , folio );
89
83
__folio_clear_lru_flags (folio );
90
- unlock_page_lruvec_irqrestore (lruvec , flags );
91
84
}
92
- /* See comment on folio_test_mlocked in folios_put() */
85
+
86
+ /*
87
+ * In rare cases, when truncation or holepunching raced with
88
+ * munlock after VM_LOCKED was cleared, Mlocked may still be
89
+ * found set here. This does not indicate a problem, unless
90
+ * "unevictable_pgs_cleared" appears worryingly large.
91
+ */
93
92
if (unlikely (folio_test_mlocked (folio ))) {
94
93
long nr_pages = folio_nr_pages (folio );
95
94
@@ -99,9 +98,23 @@ static void __page_cache_release(struct folio *folio)
99
98
}
100
99
}
101
100
101
+ /*
102
+ * This path almost never happens for VM activity - pages are normally freed
103
+ * in batches. But it gets used by networking - and for compound pages.
104
+ */
105
+ static void page_cache_release (struct folio * folio )
106
+ {
107
+ struct lruvec * lruvec = NULL ;
108
+ unsigned long flags ;
109
+
110
+ __page_cache_release (folio , & lruvec , & flags );
111
+ if (lruvec )
112
+ unlock_page_lruvec_irqrestore (lruvec , flags );
113
+ }
114
+
102
115
static void __folio_put_small (struct folio * folio )
103
116
{
104
- __page_cache_release (folio );
117
+ page_cache_release (folio );
105
118
mem_cgroup_uncharge (folio );
106
119
free_unref_page (& folio -> page , 0 );
107
120
}
@@ -115,7 +128,7 @@ static void __folio_put_large(struct folio *folio)
115
128
* be called for hugetlb (it has a separate hugetlb_cgroup.)
116
129
*/
117
130
if (!folio_test_hugetlb (folio ))
118
- __page_cache_release (folio );
131
+ page_cache_release (folio );
119
132
destroy_large_folio (folio );
120
133
}
121
134
@@ -216,7 +229,7 @@ static void folio_batch_move_lru(struct folio_batch *fbatch, move_fn_t move_fn)
216
229
if (move_fn != lru_add_fn && !folio_test_clear_lru (folio ))
217
230
continue ;
218
231
219
- lruvec = folio_lruvec_relock_irqsave (folio , lruvec , & flags );
232
+ folio_lruvec_relock_irqsave (folio , & lruvec , & flags );
220
233
move_fn (lruvec , folio );
221
234
222
235
folio_set_lru (folio );
@@ -999,24 +1012,7 @@ void folios_put_refs(struct folio_batch *folios, unsigned int *refs)
999
1012
continue ;
1000
1013
}
1001
1014
1002
- if (folio_test_lru (folio )) {
1003
- lruvec = folio_lruvec_relock_irqsave (folio , lruvec ,
1004
- & flags );
1005
- lruvec_del_folio (lruvec , folio );
1006
- __folio_clear_lru_flags (folio );
1007
- }
1008
-
1009
- /*
1010
- * In rare cases, when truncation or holepunching raced with
1011
- * munlock after VM_LOCKED was cleared, Mlocked may still be
1012
- * found set here. This does not indicate a problem, unless
1013
- * "unevictable_pgs_cleared" appears worryingly large.
1014
- */
1015
- if (unlikely (folio_test_mlocked (folio ))) {
1016
- __folio_clear_mlocked (folio );
1017
- zone_stat_sub_folio (folio , NR_MLOCK );
1018
- count_vm_event (UNEVICTABLE_PGCLEARED );
1019
- }
1015
+ __page_cache_release (folio , & lruvec , & flags );
1020
1016
1021
1017
if (j != i )
1022
1018
folios -> folios [j ] = folio ;
0 commit comments