Skip to content

Commit d6530a1

Browse files
Muchun Songjfvogel
authored andcommitted
mm: vmscan: rework move_pages_to_lru()
In the later patch, we will reparent the LRU pages. The pages moved to appropriate LRU list can be reparented during the process of the move_pages_to_lru(). So holding a lruvec lock by the caller is wrong, we should use the more general interface of folio_lruvec_relock_irq() to acquire the correct lruvec lock. Signed-off-by: Muchun Song <[email protected]> Acked-by: Johannes Weiner <[email protected]> Acked-by: Roman Gushchin <[email protected]> Link: https://lore.kernel.org/all/[email protected]/ Orabug: 37405594 Conflicts: mm/vmscan.c (Because of UEK-8 using folio interface. Also since UEK-8 has evict_folios, the invocation of move_folios_to_lru was modified accordingly.) Signed-off-by: Imran Khan <[email protected]> Reviewed-by: Kamalesh Babulal <[email protected]>
1 parent e8487c5 commit d6530a1

File tree

1 file changed

+24
-26
lines changed

1 file changed

+24
-26
lines changed

mm/vmscan.c

Lines changed: 24 additions & 26 deletions
Original file line numberDiff line numberDiff line change
@@ -1835,24 +1835,27 @@ static bool too_many_isolated(struct pglist_data *pgdat, int file,
18351835
/*
18361836
* move_folios_to_lru() moves folios from private @list to appropriate LRU list.
18371837
*
1838-
* Returns the number of pages moved to the given lruvec.
1838+
* Returns the number of pages moved to the appropriate LRU list.
1839+
*
1840+
* Note: The caller must not hold any lruvec lock.
18391841
*/
1840-
static unsigned int move_folios_to_lru(struct lruvec *lruvec,
1841-
struct list_head *list)
1842+
static unsigned int move_folios_to_lru(struct list_head *list)
18421843
{
18431844
int nr_pages, nr_moved = 0;
18441845
struct folio_batch free_folios;
1846+
struct lruvec *lruvec = NULL;
18451847

18461848
folio_batch_init(&free_folios);
18471849
while (!list_empty(list)) {
18481850
struct folio *folio = lru_to_folio(list);
18491851

1852+
lruvec = folio_lruvec_relock_irq(folio, lruvec);
18501853
VM_BUG_ON_FOLIO(folio_test_lru(folio), folio);
18511854
list_del(&folio->lru);
18521855
if (unlikely(!folio_evictable(folio))) {
1853-
spin_unlock_irq(&lruvec->lru_lock);
1856+
lruvec_unlock_irq(lruvec);
18541857
folio_putback_lru(folio);
1855-
spin_lock_irq(&lruvec->lru_lock);
1858+
lruvec = NULL;
18561859
continue;
18571860
}
18581861

@@ -1874,19 +1877,15 @@ static unsigned int move_folios_to_lru(struct lruvec *lruvec,
18741877

18751878
folio_unqueue_deferred_split(folio);
18761879
if (folio_batch_add(&free_folios, folio) == 0) {
1877-
spin_unlock_irq(&lruvec->lru_lock);
1880+
lruvec_unlock_irq(lruvec);
18781881
mem_cgroup_uncharge_folios(&free_folios);
18791882
free_unref_folios(&free_folios);
1880-
spin_lock_irq(&lruvec->lru_lock);
1883+
lruvec = NULL;
18811884
}
18821885

18831886
continue;
18841887
}
18851888

1886-
/*
1887-
* All pages were isolated from the same lruvec (and isolation
1888-
* inhibits memcg migration).
1889-
*/
18901889
VM_BUG_ON_FOLIO(!folio_matches_lruvec(folio, lruvec), folio);
18911890
lruvec_add_folio(lruvec, folio);
18921891
nr_pages = folio_nr_pages(folio);
@@ -1896,11 +1895,13 @@ static unsigned int move_folios_to_lru(struct lruvec *lruvec,
18961895
}
18971896

18981897
if (free_folios.nr) {
1899-
spin_unlock_irq(&lruvec->lru_lock);
1898+
lruvec_unlock_irq(lruvec);
19001899
mem_cgroup_uncharge_folios(&free_folios);
19011900
free_unref_folios(&free_folios);
1902-
spin_lock_irq(&lruvec->lru_lock);
1901+
lruvec = NULL;
19031902
}
1903+
if (lruvec)
1904+
lruvec_unlock_irq(lruvec);
19041905

19051906
return nr_moved;
19061907
}
@@ -1967,18 +1968,18 @@ static unsigned long shrink_inactive_list(unsigned long nr_to_scan,
19671968

19681969
nr_reclaimed = shrink_folio_list(&folio_list, pgdat, sc, &stat, false);
19691970

1970-
spin_lock_irq(&lruvec->lru_lock);
1971-
move_folios_to_lru(lruvec, &folio_list);
1971+
move_folios_to_lru(&folio_list);
19721972

19731973
__mod_lruvec_state(lruvec, PGDEMOTE_KSWAPD + reclaimer_offset(),
19741974
stat.nr_demoted);
1975+
local_irq_disable();
19751976
__mod_node_page_state(pgdat, NR_ISOLATED_ANON + file, -nr_taken);
19761977
item = PGSTEAL_KSWAPD + reclaimer_offset();
19771978
if (!cgroup_reclaim(sc))
19781979
__count_vm_events(item, nr_reclaimed);
19791980
__count_memcg_events(lruvec_memcg(lruvec), item, nr_reclaimed);
19801981
__count_vm_events(PGSTEAL_ANON + file, nr_reclaimed);
1981-
spin_unlock_irq(&lruvec->lru_lock);
1982+
local_irq_enable();
19821983

19831984
lru_note_cost(lruvec, file, stat.nr_pageout, nr_scanned - nr_reclaimed);
19841985

@@ -2117,16 +2118,15 @@ static void shrink_active_list(unsigned long nr_to_scan,
21172118
/*
21182119
* Move folios back to the lru list.
21192120
*/
2120-
spin_lock_irq(&lruvec->lru_lock);
21212121

2122-
nr_activate = move_folios_to_lru(lruvec, &l_active);
2123-
nr_deactivate = move_folios_to_lru(lruvec, &l_inactive);
2122+
nr_activate = move_folios_to_lru(&l_active);
2123+
nr_deactivate = move_folios_to_lru(&l_inactive);
21242124

2125+
local_irq_disable();
21252126
__count_vm_events(PGDEACTIVATE, nr_deactivate);
21262127
__count_memcg_events(lruvec_memcg(lruvec), PGDEACTIVATE, nr_deactivate);
2127-
21282128
__mod_node_page_state(pgdat, NR_ISOLATED_ANON + file, -nr_taken);
2129-
spin_unlock_irq(&lruvec->lru_lock);
2129+
local_irq_enable();
21302130

21312131
if (nr_rotated)
21322132
lru_note_cost(lruvec, file, 0, nr_rotated);
@@ -4628,9 +4628,7 @@ static int evict_folios(struct lruvec *lruvec, struct scan_control *sc, int swap
46284628
list_move(&folio->lru, &clean);
46294629
}
46304630

4631-
spin_lock_irq(&lruvec->lru_lock);
4632-
4633-
move_folios_to_lru(lruvec, &list);
4631+
move_folios_to_lru(&list);
46344632

46354633
walk = current->reclaim_state->mm_walk;
46364634
if (walk && walk->batched) {
@@ -4641,13 +4639,13 @@ static int evict_folios(struct lruvec *lruvec, struct scan_control *sc, int swap
46414639
__mod_lruvec_state(lruvec, PGDEMOTE_KSWAPD + reclaimer_offset(),
46424640
stat.nr_demoted);
46434641

4642+
local_irq_disable();
46444643
item = PGSTEAL_KSWAPD + reclaimer_offset();
46454644
if (!cgroup_reclaim(sc))
46464645
__count_vm_events(item, reclaimed);
46474646
__count_memcg_events(memcg, item, reclaimed);
46484647
__count_vm_events(PGSTEAL_ANON + type, reclaimed);
4649-
4650-
spin_unlock_irq(&lruvec->lru_lock);
4648+
local_irq_enable();
46514649

46524650
list_splice_init(&clean, &list);
46534651

0 commit comments

Comments
 (0)