Skip to content

Commit 688035f

Browse files
hnaztorvalds
authored andcommitted
mm: don't avoid high-priority reclaim on memcg limit reclaim
Commit 246e87a ("memcg: fix get_scan_count() for small targets") sought to avoid high reclaim priorities for memcg by forcing it to scan a minimum amount of pages when lru_pages >> priority yielded nothing. This was done at a time when reclaim decisions like dirty throttling were tied to the priority level. Nowadays, the only meaningful thing still tied to priority dropping below DEF_PRIORITY - 2 is gating whether laptop_mode=1 is generally allowed to write. But that is from an era where direct reclaim was still allowed to call ->writepage, and kswapd nowadays avoids writes until it's scanned every clean page in the system. Potential changes to how quick sc->may_writepage could trigger are of little concern. Remove the force_scan stuff, as well as the ugly multi-pass target calculation that it necessitated. Link: http://lkml.kernel.org/r/[email protected] Signed-off-by: Johannes Weiner <[email protected]> Acked-by: Hillf Danton <[email protected]> Acked-by: Michal Hocko <[email protected]> Cc: Jia He <[email protected]> Cc: Mel Gorman <[email protected]> Signed-off-by: Andrew Morton <[email protected]> Signed-off-by: Linus Torvalds <[email protected]>
1 parent a2d7f8e commit 688035f

File tree

1 file changed

+37
-57
lines changed

1 file changed

+37
-57
lines changed

mm/vmscan.c

Lines changed: 37 additions & 57 deletions
Original file line numberDiff line numberDiff line change
@@ -2123,21 +2123,8 @@ static void get_scan_count(struct lruvec *lruvec, struct mem_cgroup *memcg,
21232123
unsigned long anon_prio, file_prio;
21242124
enum scan_balance scan_balance;
21252125
unsigned long anon, file;
2126-
bool force_scan = false;
21272126
unsigned long ap, fp;
21282127
enum lru_list lru;
2129-
bool some_scanned;
2130-
int pass;
2131-
2132-
/*
2133-
* If the zone or memcg is small, nr[l] can be 0. When
2134-
* reclaiming for a memcg, a priority drop can cause high
2135-
* latencies, so it's better to scan a minimum amount. When a
2136-
* cgroup has already been deleted, scrape out the remaining
2137-
* cache forcefully to get rid of the lingering state.
2138-
*/
2139-
if (!global_reclaim(sc) || !mem_cgroup_online(memcg))
2140-
force_scan = true;
21412128

21422129
/* If we have no swap space, do not bother scanning anon pages. */
21432130
if (!sc->may_swap || mem_cgroup_get_nr_swap_pages(memcg) <= 0) {
@@ -2268,55 +2255,48 @@ static void get_scan_count(struct lruvec *lruvec, struct mem_cgroup *memcg,
22682255
fraction[1] = fp;
22692256
denominator = ap + fp + 1;
22702257
out:
2271-
some_scanned = false;
2272-
/* Only use force_scan on second pass. */
2273-
for (pass = 0; !some_scanned && pass < 2; pass++) {
2274-
*lru_pages = 0;
2275-
for_each_evictable_lru(lru) {
2276-
int file = is_file_lru(lru);
2277-
unsigned long size;
2278-
unsigned long scan;
2279-
2280-
size = lruvec_lru_size(lruvec, lru, sc->reclaim_idx);
2281-
scan = size >> sc->priority;
2282-
2283-
if (!scan && pass && force_scan)
2284-
scan = min(size, SWAP_CLUSTER_MAX);
2285-
2286-
switch (scan_balance) {
2287-
case SCAN_EQUAL:
2288-
/* Scan lists relative to size */
2289-
break;
2290-
case SCAN_FRACT:
2291-
/*
2292-
* Scan types proportional to swappiness and
2293-
* their relative recent reclaim efficiency.
2294-
*/
2295-
scan = div64_u64(scan * fraction[file],
2296-
denominator);
2297-
break;
2298-
case SCAN_FILE:
2299-
case SCAN_ANON:
2300-
/* Scan one type exclusively */
2301-
if ((scan_balance == SCAN_FILE) != file) {
2302-
size = 0;
2303-
scan = 0;
2304-
}
2305-
break;
2306-
default:
2307-
/* Look ma, no brain */
2308-
BUG();
2309-
}
2258+
*lru_pages = 0;
2259+
for_each_evictable_lru(lru) {
2260+
int file = is_file_lru(lru);
2261+
unsigned long size;
2262+
unsigned long scan;
23102263

2311-
*lru_pages += size;
2312-
nr[lru] = scan;
2264+
size = lruvec_lru_size(lruvec, lru, sc->reclaim_idx);
2265+
scan = size >> sc->priority;
2266+
/*
2267+
* If the cgroup's already been deleted, make sure to
2268+
* scrape out the remaining cache.
2269+
*/
2270+
if (!scan && !mem_cgroup_online(memcg))
2271+
scan = min(size, SWAP_CLUSTER_MAX);
23132272

2273+
switch (scan_balance) {
2274+
case SCAN_EQUAL:
2275+
/* Scan lists relative to size */
2276+
break;
2277+
case SCAN_FRACT:
23142278
/*
2315-
* Skip the second pass and don't force_scan,
2316-
* if we found something to scan.
2279+
* Scan types proportional to swappiness and
2280+
* their relative recent reclaim efficiency.
23172281
*/
2318-
some_scanned |= !!scan;
2282+
scan = div64_u64(scan * fraction[file],
2283+
denominator);
2284+
break;
2285+
case SCAN_FILE:
2286+
case SCAN_ANON:
2287+
/* Scan one type exclusively */
2288+
if ((scan_balance == SCAN_FILE) != file) {
2289+
size = 0;
2290+
scan = 0;
2291+
}
2292+
break;
2293+
default:
2294+
/* Look ma, no brain */
2295+
BUG();
23192296
}
2297+
2298+
*lru_pages += size;
2299+
nr[lru] = scan;
23202300
}
23212301
}
23222302

0 commit comments

Comments
 (0)