@@ -1659,7 +1659,6 @@ static struct folio *stable_node_dup(struct ksm_stable_node **_stable_node_dup,
1659
1659
struct ksm_stable_node * dup , * found = NULL , * stable_node = * _stable_node ;
1660
1660
struct hlist_node * hlist_safe ;
1661
1661
struct folio * folio , * tree_folio = NULL ;
1662
- int nr = 0 ;
1663
1662
int found_rmap_hlist_len ;
1664
1663
1665
1664
if (!prune_stale_stable_nodes ||
@@ -1686,33 +1685,26 @@ static struct folio *stable_node_dup(struct ksm_stable_node **_stable_node_dup,
1686
1685
folio = ksm_get_folio (dup , KSM_GET_FOLIO_NOLOCK );
1687
1686
if (!folio )
1688
1687
continue ;
1689
- nr += 1 ;
1690
- if (is_page_sharing_candidate (dup )) {
1691
- if (!found ||
1692
- dup -> rmap_hlist_len > found_rmap_hlist_len ) {
1693
- if (found )
1694
- folio_put (tree_folio );
1695
- found = dup ;
1696
- found_rmap_hlist_len = found -> rmap_hlist_len ;
1697
- tree_folio = folio ;
1698
-
1699
- /* skip put_page for found dup */
1700
- if (!prune_stale_stable_nodes )
1701
- break ;
1702
- continue ;
1703
- }
1688
+ /* Pick the best candidate if possible. */
1689
+ if (!found || (is_page_sharing_candidate (dup ) &&
1690
+ (!is_page_sharing_candidate (found ) ||
1691
+ dup -> rmap_hlist_len > found_rmap_hlist_len ))) {
1692
+ if (found )
1693
+ folio_put (tree_folio );
1694
+ found = dup ;
1695
+ found_rmap_hlist_len = found -> rmap_hlist_len ;
1696
+ tree_folio = folio ;
1697
+ /* skip put_page for found candidate */
1698
+ if (!prune_stale_stable_nodes &&
1699
+ is_page_sharing_candidate (found ))
1700
+ break ;
1701
+ continue ;
1704
1702
}
1705
1703
folio_put (folio );
1706
1704
}
1707
1705
1708
1706
if (found ) {
1709
- /*
1710
- * nr is counting all dups in the chain only if
1711
- * prune_stale_stable_nodes is true, otherwise we may
1712
- * break the loop at nr == 1 even if there are
1713
- * multiple entries.
1714
- */
1715
- if (prune_stale_stable_nodes && nr == 1 ) {
1707
+ if (hlist_is_singular_node (& found -> hlist_dup , & stable_node -> hlist )) {
1716
1708
/*
1717
1709
* If there's not just one entry it would
1718
1710
* corrupt memory, better BUG_ON. In KSM
@@ -1764,25 +1756,15 @@ static struct folio *stable_node_dup(struct ksm_stable_node **_stable_node_dup,
1764
1756
hlist_add_head (& found -> hlist_dup ,
1765
1757
& stable_node -> hlist );
1766
1758
}
1759
+ } else {
1760
+ /* Its hlist must be empty if no one found. */
1761
+ free_stable_node_chain (stable_node , root );
1767
1762
}
1768
1763
1769
1764
* _stable_node_dup = found ;
1770
1765
return tree_folio ;
1771
1766
}
1772
1767
1773
- static struct ksm_stable_node * stable_node_dup_any (struct ksm_stable_node * stable_node ,
1774
- struct rb_root * root )
1775
- {
1776
- if (!is_stable_node_chain (stable_node ))
1777
- return stable_node ;
1778
- if (hlist_empty (& stable_node -> hlist )) {
1779
- free_stable_node_chain (stable_node , root );
1780
- return NULL ;
1781
- }
1782
- return hlist_entry (stable_node -> hlist .first ,
1783
- typeof (* stable_node ), hlist_dup );
1784
- }
1785
-
1786
1768
/*
1787
1769
* Like for ksm_get_folio, this function can free the *_stable_node and
1788
1770
* *_stable_node_dup if the returned tree_page is NULL.
@@ -1803,17 +1785,10 @@ static struct folio *__stable_node_chain(struct ksm_stable_node **_stable_node_d
1803
1785
bool prune_stale_stable_nodes )
1804
1786
{
1805
1787
struct ksm_stable_node * stable_node = * _stable_node ;
1788
+
1806
1789
if (!is_stable_node_chain (stable_node )) {
1807
- if (is_page_sharing_candidate (stable_node )) {
1808
- * _stable_node_dup = stable_node ;
1809
- return ksm_get_folio (stable_node , KSM_GET_FOLIO_NOLOCK );
1810
- }
1811
- /*
1812
- * _stable_node_dup set to NULL means the stable_node
1813
- * reached the ksm_max_page_sharing limit.
1814
- */
1815
- * _stable_node_dup = NULL ;
1816
- return NULL ;
1790
+ * _stable_node_dup = stable_node ;
1791
+ return ksm_get_folio (stable_node , KSM_GET_FOLIO_NOLOCK );
1817
1792
}
1818
1793
return stable_node_dup (_stable_node_dup , _stable_node , root ,
1819
1794
prune_stale_stable_nodes );
@@ -1827,16 +1802,10 @@ static __always_inline struct folio *chain_prune(struct ksm_stable_node **s_n_d,
1827
1802
}
1828
1803
1829
1804
static __always_inline struct folio * chain (struct ksm_stable_node * * s_n_d ,
1830
- struct ksm_stable_node * s_n ,
1805
+ struct ksm_stable_node * * s_n ,
1831
1806
struct rb_root * root )
1832
1807
{
1833
- struct ksm_stable_node * old_stable_node = s_n ;
1834
- struct folio * tree_folio ;
1835
-
1836
- tree_folio = __stable_node_chain (s_n_d , & s_n , root , false);
1837
- /* not pruning dups so s_n cannot have changed */
1838
- VM_BUG_ON (s_n != old_stable_node );
1839
- return tree_folio ;
1808
+ return __stable_node_chain (s_n_d , s_n , root , false);
1840
1809
}
1841
1810
1842
1811
/*
@@ -1854,7 +1823,7 @@ static struct page *stable_tree_search(struct page *page)
1854
1823
struct rb_root * root ;
1855
1824
struct rb_node * * new ;
1856
1825
struct rb_node * parent ;
1857
- struct ksm_stable_node * stable_node , * stable_node_dup , * stable_node_any ;
1826
+ struct ksm_stable_node * stable_node , * stable_node_dup ;
1858
1827
struct ksm_stable_node * page_node ;
1859
1828
struct folio * folio ;
1860
1829
@@ -1878,45 +1847,7 @@ static struct page *stable_tree_search(struct page *page)
1878
1847
1879
1848
cond_resched ();
1880
1849
stable_node = rb_entry (* new , struct ksm_stable_node , node );
1881
- stable_node_any = NULL ;
1882
1850
tree_folio = chain_prune (& stable_node_dup , & stable_node , root );
1883
- /*
1884
- * NOTE: stable_node may have been freed by
1885
- * chain_prune() if the returned stable_node_dup is
1886
- * not NULL. stable_node_dup may have been inserted in
1887
- * the rbtree instead as a regular stable_node (in
1888
- * order to collapse the stable_node chain if a single
1889
- * stable_node dup was found in it). In such case the
1890
- * stable_node is overwritten by the callee to point
1891
- * to the stable_node_dup that was collapsed in the
1892
- * stable rbtree and stable_node will be equal to
1893
- * stable_node_dup like if the chain never existed.
1894
- */
1895
- if (!stable_node_dup ) {
1896
- /*
1897
- * Either all stable_node dups were full in
1898
- * this stable_node chain, or this chain was
1899
- * empty and should be rb_erased.
1900
- */
1901
- stable_node_any = stable_node_dup_any (stable_node ,
1902
- root );
1903
- if (!stable_node_any ) {
1904
- /* rb_erase just run */
1905
- goto again ;
1906
- }
1907
- /*
1908
- * Take any of the stable_node dups page of
1909
- * this stable_node chain to let the tree walk
1910
- * continue. All KSM pages belonging to the
1911
- * stable_node dups in a stable_node chain
1912
- * have the same content and they're
1913
- * write protected at all times. Any will work
1914
- * fine to continue the walk.
1915
- */
1916
- tree_folio = ksm_get_folio (stable_node_any ,
1917
- KSM_GET_FOLIO_NOLOCK );
1918
- }
1919
- VM_BUG_ON (!stable_node_dup ^ !!stable_node_any );
1920
1851
if (!tree_folio ) {
1921
1852
/*
1922
1853
* If we walked over a stale stable_node,
@@ -1954,7 +1885,7 @@ static struct page *stable_tree_search(struct page *page)
1954
1885
goto chain_append ;
1955
1886
}
1956
1887
1957
- if (!stable_node_dup ) {
1888
+ if (!is_page_sharing_candidate ( stable_node_dup ) ) {
1958
1889
/*
1959
1890
* If the stable_node is a chain and
1960
1891
* we got a payload match in memcmp
@@ -2063,9 +1994,6 @@ static struct page *stable_tree_search(struct page *page)
2063
1994
return & folio -> page ;
2064
1995
2065
1996
chain_append :
2066
- /* stable_node_dup could be null if it reached the limit */
2067
- if (!stable_node_dup )
2068
- stable_node_dup = stable_node_any ;
2069
1997
/*
2070
1998
* If stable_node was a chain and chain_prune collapsed it,
2071
1999
* stable_node has been updated to be the new regular
@@ -2110,7 +2038,7 @@ static struct ksm_stable_node *stable_tree_insert(struct folio *kfolio)
2110
2038
struct rb_root * root ;
2111
2039
struct rb_node * * new ;
2112
2040
struct rb_node * parent ;
2113
- struct ksm_stable_node * stable_node , * stable_node_dup , * stable_node_any ;
2041
+ struct ksm_stable_node * stable_node , * stable_node_dup ;
2114
2042
bool need_chain = false;
2115
2043
2116
2044
kpfn = folio_pfn (kfolio );
@@ -2126,33 +2054,7 @@ static struct ksm_stable_node *stable_tree_insert(struct folio *kfolio)
2126
2054
2127
2055
cond_resched ();
2128
2056
stable_node = rb_entry (* new , struct ksm_stable_node , node );
2129
- stable_node_any = NULL ;
2130
- tree_folio = chain (& stable_node_dup , stable_node , root );
2131
- if (!stable_node_dup ) {
2132
- /*
2133
- * Either all stable_node dups were full in
2134
- * this stable_node chain, or this chain was
2135
- * empty and should be rb_erased.
2136
- */
2137
- stable_node_any = stable_node_dup_any (stable_node ,
2138
- root );
2139
- if (!stable_node_any ) {
2140
- /* rb_erase just run */
2141
- goto again ;
2142
- }
2143
- /*
2144
- * Take any of the stable_node dups page of
2145
- * this stable_node chain to let the tree walk
2146
- * continue. All KSM pages belonging to the
2147
- * stable_node dups in a stable_node chain
2148
- * have the same content and they're
2149
- * write protected at all times. Any will work
2150
- * fine to continue the walk.
2151
- */
2152
- tree_folio = ksm_get_folio (stable_node_any ,
2153
- KSM_GET_FOLIO_NOLOCK );
2154
- }
2155
- VM_BUG_ON (!stable_node_dup ^ !!stable_node_any );
2057
+ tree_folio = chain (& stable_node_dup , & stable_node , root );
2156
2058
if (!tree_folio ) {
2157
2059
/*
2158
2060
* If we walked over a stale stable_node,
0 commit comments