@@ -1835,24 +1835,27 @@ static bool too_many_isolated(struct pglist_data *pgdat, int file,
1835
1835
/*
1836
1836
* move_folios_to_lru() moves folios from private @list to appropriate LRU list.
1837
1837
*
1838
- * Returns the number of pages moved to the given lruvec.
1838
+ * Returns the number of pages moved to the appropriate LRU list.
1839
+ *
1840
+ * Note: The caller must not hold any lruvec lock.
1839
1841
*/
1840
- static unsigned int move_folios_to_lru (struct lruvec * lruvec ,
1841
- struct list_head * list )
1842
+ static unsigned int move_folios_to_lru (struct list_head * list )
1842
1843
{
1843
1844
int nr_pages , nr_moved = 0 ;
1844
1845
struct folio_batch free_folios ;
1846
+ struct lruvec * lruvec = NULL ;
1845
1847
1846
1848
folio_batch_init (& free_folios );
1847
1849
while (!list_empty (list )) {
1848
1850
struct folio * folio = lru_to_folio (list );
1849
1851
1852
+ lruvec = folio_lruvec_relock_irq (folio , lruvec );
1850
1853
VM_BUG_ON_FOLIO (folio_test_lru (folio ), folio );
1851
1854
list_del (& folio -> lru );
1852
1855
if (unlikely (!folio_evictable (folio ))) {
1853
- spin_unlock_irq ( & lruvec -> lru_lock );
1856
+ lruvec_unlock_irq ( lruvec );
1854
1857
folio_putback_lru (folio );
1855
- spin_lock_irq ( & lruvec -> lru_lock ) ;
1858
+ lruvec = NULL ;
1856
1859
continue ;
1857
1860
}
1858
1861
@@ -1874,19 +1877,15 @@ static unsigned int move_folios_to_lru(struct lruvec *lruvec,
1874
1877
1875
1878
folio_unqueue_deferred_split (folio );
1876
1879
if (folio_batch_add (& free_folios , folio ) == 0 ) {
1877
- spin_unlock_irq ( & lruvec -> lru_lock );
1880
+ lruvec_unlock_irq ( lruvec );
1878
1881
mem_cgroup_uncharge_folios (& free_folios );
1879
1882
free_unref_folios (& free_folios );
1880
- spin_lock_irq ( & lruvec -> lru_lock ) ;
1883
+ lruvec = NULL ;
1881
1884
}
1882
1885
1883
1886
continue ;
1884
1887
}
1885
1888
1886
- /*
1887
- * All pages were isolated from the same lruvec (and isolation
1888
- * inhibits memcg migration).
1889
- */
1890
1889
VM_BUG_ON_FOLIO (!folio_matches_lruvec (folio , lruvec ), folio );
1891
1890
lruvec_add_folio (lruvec , folio );
1892
1891
nr_pages = folio_nr_pages (folio );
@@ -1896,11 +1895,13 @@ static unsigned int move_folios_to_lru(struct lruvec *lruvec,
1896
1895
}
1897
1896
1898
1897
if (free_folios .nr ) {
1899
- spin_unlock_irq ( & lruvec -> lru_lock );
1898
+ lruvec_unlock_irq ( lruvec );
1900
1899
mem_cgroup_uncharge_folios (& free_folios );
1901
1900
free_unref_folios (& free_folios );
1902
- spin_lock_irq ( & lruvec -> lru_lock ) ;
1901
+ lruvec = NULL ;
1903
1902
}
1903
+ if (lruvec )
1904
+ lruvec_unlock_irq (lruvec );
1904
1905
1905
1906
return nr_moved ;
1906
1907
}
@@ -1967,18 +1968,18 @@ static unsigned long shrink_inactive_list(unsigned long nr_to_scan,
1967
1968
1968
1969
nr_reclaimed = shrink_folio_list (& folio_list , pgdat , sc , & stat , false);
1969
1970
1970
- spin_lock_irq (& lruvec -> lru_lock );
1971
- move_folios_to_lru (lruvec , & folio_list );
1971
+ move_folios_to_lru (& folio_list );
1972
1972
1973
1973
__mod_lruvec_state (lruvec , PGDEMOTE_KSWAPD + reclaimer_offset (),
1974
1974
stat .nr_demoted );
1975
+ local_irq_disable ();
1975
1976
__mod_node_page_state (pgdat , NR_ISOLATED_ANON + file , - nr_taken );
1976
1977
item = PGSTEAL_KSWAPD + reclaimer_offset ();
1977
1978
if (!cgroup_reclaim (sc ))
1978
1979
__count_vm_events (item , nr_reclaimed );
1979
1980
__count_memcg_events (lruvec_memcg (lruvec ), item , nr_reclaimed );
1980
1981
__count_vm_events (PGSTEAL_ANON + file , nr_reclaimed );
1981
- spin_unlock_irq ( & lruvec -> lru_lock );
1982
+ local_irq_enable ( );
1982
1983
1983
1984
lru_note_cost (lruvec , file , stat .nr_pageout , nr_scanned - nr_reclaimed );
1984
1985
@@ -2117,16 +2118,15 @@ static void shrink_active_list(unsigned long nr_to_scan,
2117
2118
/*
2118
2119
* Move folios back to the lru list.
2119
2120
*/
2120
- spin_lock_irq (& lruvec -> lru_lock );
2121
2121
2122
- nr_activate = move_folios_to_lru (lruvec , & l_active );
2123
- nr_deactivate = move_folios_to_lru (lruvec , & l_inactive );
2122
+ nr_activate = move_folios_to_lru (& l_active );
2123
+ nr_deactivate = move_folios_to_lru (& l_inactive );
2124
2124
2125
+ local_irq_disable ();
2125
2126
__count_vm_events (PGDEACTIVATE , nr_deactivate );
2126
2127
__count_memcg_events (lruvec_memcg (lruvec ), PGDEACTIVATE , nr_deactivate );
2127
-
2128
2128
__mod_node_page_state (pgdat , NR_ISOLATED_ANON + file , - nr_taken );
2129
- spin_unlock_irq ( & lruvec -> lru_lock );
2129
+ local_irq_enable ( );
2130
2130
2131
2131
if (nr_rotated )
2132
2132
lru_note_cost (lruvec , file , 0 , nr_rotated );
@@ -4628,9 +4628,7 @@ static int evict_folios(struct lruvec *lruvec, struct scan_control *sc, int swap
4628
4628
list_move (& folio -> lru , & clean );
4629
4629
}
4630
4630
4631
- spin_lock_irq (& lruvec -> lru_lock );
4632
-
4633
- move_folios_to_lru (lruvec , & list );
4631
+ move_folios_to_lru (& list );
4634
4632
4635
4633
walk = current -> reclaim_state -> mm_walk ;
4636
4634
if (walk && walk -> batched ) {
@@ -4641,13 +4639,13 @@ static int evict_folios(struct lruvec *lruvec, struct scan_control *sc, int swap
4641
4639
__mod_lruvec_state (lruvec , PGDEMOTE_KSWAPD + reclaimer_offset (),
4642
4640
stat .nr_demoted );
4643
4641
4642
+ local_irq_disable ();
4644
4643
item = PGSTEAL_KSWAPD + reclaimer_offset ();
4645
4644
if (!cgroup_reclaim (sc ))
4646
4645
__count_vm_events (item , reclaimed );
4647
4646
__count_memcg_events (memcg , item , reclaimed );
4648
4647
__count_vm_events (PGSTEAL_ANON + type , reclaimed );
4649
-
4650
- spin_unlock_irq (& lruvec -> lru_lock );
4648
+ local_irq_enable ();
4651
4649
4652
4650
list_splice_init (& clean , & list );
4653
4651
0 commit comments