Skip to content

Commit b4d02ba

Browse files
davidhildenbrandakpm00
authored andcommitted
mm/memfd: refactor memfd_tag_pins() and memfd_wait_for_pins()
Patch series "mm: remove total_mapcount()", v2. Let's remove the remaining user from mm/memfd.c so we can get rid of total_mapcount(). This patch (of 2): Both functions are the remaining users of total_mapcount(). Let's get rid of the calls by converting the code to folios. As it turns out, the code is unnecessarily complicated, especially: 1) We can query the number of pagecache references for a folio simply via folio_nr_pages(). This will handle other folio sizes in the future correctly. 2) The xas_set(xas, page->index + cache_count) call to increment the iterator for large folios is not required. Remove it. Further, simplify the XA_CHECK_SCHED check, counting each entry exactly once. Memfd pages can be swapped out when using shmem; leave xa_is_value() checks in place. Link: https://lkml.kernel.org/r/[email protected] Link: https://lkml.kernel.org/r/[email protected] Co-developed-by: Matthew Wilcox (Oracle) <[email protected]> Signed-off-by: Matthew Wilcox (Oracle) <[email protected]> Signed-off-by: David Hildenbrand <[email protected]> Reviewed-by: Matthew Wilcox (Oracle) <[email protected]> Signed-off-by: Andrew Morton <[email protected]>
1 parent fc4d182 commit b4d02ba

File tree

1 file changed

+18
-29
lines changed

1 file changed

+18
-29
lines changed

mm/memfd.c

Lines changed: 18 additions & 29 deletions
Original file line numberDiff line numberDiff line change
@@ -29,29 +29,25 @@
2929
#define MEMFD_TAG_PINNED PAGECACHE_TAG_TOWRITE
3030
#define LAST_SCAN 4 /* about 150ms max */
3131

32+
static bool memfd_folio_has_extra_refs(struct folio *folio)
33+
{
34+
return folio_ref_count(folio) - folio_mapcount(folio) !=
35+
folio_nr_pages(folio);
36+
}
37+
3238
static void memfd_tag_pins(struct xa_state *xas)
3339
{
34-
struct page *page;
40+
struct folio *folio;
3541
int latency = 0;
36-
int cache_count;
3742

3843
lru_add_drain();
3944

4045
xas_lock_irq(xas);
41-
xas_for_each(xas, page, ULONG_MAX) {
42-
cache_count = 1;
43-
if (!xa_is_value(page) &&
44-
PageTransHuge(page) && !PageHuge(page))
45-
cache_count = HPAGE_PMD_NR;
46-
47-
if (!xa_is_value(page) &&
48-
page_count(page) - total_mapcount(page) != cache_count)
46+
xas_for_each(xas, folio, ULONG_MAX) {
47+
if (!xa_is_value(folio) && memfd_folio_has_extra_refs(folio))
4948
xas_set_mark(xas, MEMFD_TAG_PINNED);
50-
if (cache_count != 1)
51-
xas_set(xas, page->index + cache_count);
5249

53-
latency += cache_count;
54-
if (latency < XA_CHECK_SCHED)
50+
if (++latency < XA_CHECK_SCHED)
5551
continue;
5652
latency = 0;
5753

@@ -66,24 +62,23 @@ static void memfd_tag_pins(struct xa_state *xas)
6662
/*
6763
* Setting SEAL_WRITE requires us to verify there's no pending writer. However,
6864
* via get_user_pages(), drivers might have some pending I/O without any active
69-
* user-space mappings (eg., direct-IO, AIO). Therefore, we look at all pages
65+
* user-space mappings (eg., direct-IO, AIO). Therefore, we look at all folios
7066
* and see whether it has an elevated ref-count. If so, we tag them and wait for
7167
* them to be dropped.
7268
* The caller must guarantee that no new user will acquire writable references
73-
* to those pages to avoid races.
69+
* to those folios to avoid races.
7470
*/
7571
static int memfd_wait_for_pins(struct address_space *mapping)
7672
{
7773
XA_STATE(xas, &mapping->i_pages, 0);
78-
struct page *page;
74+
struct folio *folio;
7975
int error, scan;
8076

8177
memfd_tag_pins(&xas);
8278

8379
error = 0;
8480
for (scan = 0; scan <= LAST_SCAN; scan++) {
8581
int latency = 0;
86-
int cache_count;
8782

8883
if (!xas_marked(&xas, MEMFD_TAG_PINNED))
8984
break;
@@ -95,20 +90,15 @@ static int memfd_wait_for_pins(struct address_space *mapping)
9590

9691
xas_set(&xas, 0);
9792
xas_lock_irq(&xas);
98-
xas_for_each_marked(&xas, page, ULONG_MAX, MEMFD_TAG_PINNED) {
93+
xas_for_each_marked(&xas, folio, ULONG_MAX, MEMFD_TAG_PINNED) {
9994
bool clear = true;
10095

101-
cache_count = 1;
102-
if (!xa_is_value(page) &&
103-
PageTransHuge(page) && !PageHuge(page))
104-
cache_count = HPAGE_PMD_NR;
105-
106-
if (!xa_is_value(page) && cache_count !=
107-
page_count(page) - total_mapcount(page)) {
96+
if (!xa_is_value(folio) &&
97+
memfd_folio_has_extra_refs(folio)) {
10898
/*
10999
* On the last scan, we clean up all those tags
110100
* we inserted; but make a note that we still
111-
* found pages pinned.
101+
* found folios pinned.
112102
*/
113103
if (scan == LAST_SCAN)
114104
error = -EBUSY;
@@ -118,8 +108,7 @@ static int memfd_wait_for_pins(struct address_space *mapping)
118108
if (clear)
119109
xas_clear_mark(&xas, MEMFD_TAG_PINNED);
120110

121-
latency += cache_count;
122-
if (latency < XA_CHECK_SCHED)
111+
if (++latency < XA_CHECK_SCHED)
123112
continue;
124113
latency = 0;
125114

0 commit comments

Comments
 (0)