Skip to content

Commit 0a0337e

Browse files
Michal Hockotorvalds
authored andcommitted
mm, oom: rework oom detection
__alloc_pages_slowpath has traditionally relied on the direct reclaim and did_some_progress as an indicator that it makes sense to retry allocation rather than declaring OOM. shrink_zones had to rely on zone_reclaimable if shrink_zone didn't make any progress to prevent from a premature OOM killer invocation - the LRU might be full of dirty or writeback pages and direct reclaim cannot clean those up. zone_reclaimable allows to rescan the reclaimable lists several times and restart if a page is freed. This is really subtle behavior and it might lead to a livelock when a single freed page keeps allocator looping but the current task will not be able to allocate that single page. OOM killer would be more appropriate than looping without any progress for unbounded amount of time. This patch changes OOM detection logic and pulls it out from shrink_zone which is too low to be appropriate for any high level decisions such as OOM which is per zonelist property. It is __alloc_pages_slowpath which knows how many attempts have been done and what was the progress so far therefore it is more appropriate to implement this logic. The new heuristic is implemented in should_reclaim_retry helper called from __alloc_pages_slowpath. It tries to be more deterministic and easier to follow. It builds on an assumption that retrying makes sense only if the currently reclaimable memory + free pages would allow the current allocation request to succeed (as per __zone_watermark_ok) at least for one zone in the usable zonelist. This alone wouldn't be sufficient, though, because the writeback might get stuck and reclaimable pages might be pinned for a really long time or even depend on the current allocation context. Therefore there is a backoff mechanism implemented which reduces the reclaim target after each reclaim round without any progress. This means that we should eventually converge to only NR_FREE_PAGES as the target and fail on the wmark check and proceed to OOM. The backoff is simple and linear with 1/16 of the reclaimable pages for each round without any progress. We are optimistic and reset counter for successful reclaim rounds. Costly high order pages mostly preserve their semantic and those without __GFP_REPEAT fail right away while those which have the flag set will back off after the amount of reclaimable pages reaches equivalent of the requested order. The only difference is that if there was no progress during the reclaim we rely on zone watermark check. This is more logical thing to do than previous 1<<order attempts which were a result of zone_reclaimable faking the progress. [[email protected]: check classzone_idx for shrink_zone] [[email protected]: separate the heuristic into should_reclaim_retry] [[email protected]: use zone_page_state_snapshot for NR_FREE_PAGES] [[email protected]: shrink_zones doesn't need to return anything] Signed-off-by: Michal Hocko <[email protected]> Acked-by: Hillf Danton <[email protected]> Cc: Vladimir Davydov <[email protected]> Cc: Johannes Weiner <[email protected]> Cc: David Rientjes <[email protected]> Cc: Joonsoo Kim <[email protected]> Cc: Mel Gorman <[email protected]> Cc: Tetsuo Handa <[email protected]> Cc: Vlastimil Babka <[email protected]> Signed-off-by: Andrew Morton <[email protected]> Signed-off-by: Linus Torvalds <[email protected]>
1 parent cab1802 commit 0a0337e

File tree

3 files changed

+97
-29
lines changed

3 files changed

+97
-29
lines changed

include/linux/swap.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -316,6 +316,7 @@ extern void lru_cache_add_active_or_unevictable(struct page *page,
316316
struct vm_area_struct *vma);
317317

318318
/* linux/mm/vmscan.c */
319+
extern unsigned long zone_reclaimable_pages(struct zone *zone);
319320
extern unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
320321
gfp_t gfp_mask, nodemask_t *mask);
321322
extern int __isolate_lru_page(struct page *page, isolate_mode_t mode);

mm/page_alloc.c

Lines changed: 92 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -3386,6 +3386,77 @@ static inline bool is_thp_gfp_mask(gfp_t gfp_mask)
33863386
return (gfp_mask & (GFP_TRANSHUGE | __GFP_KSWAPD_RECLAIM)) == GFP_TRANSHUGE;
33873387
}
33883388

3389+
/*
3390+
* Maximum number of reclaim retries without any progress before OOM killer
3391+
* is consider as the only way to move forward.
3392+
*/
3393+
#define MAX_RECLAIM_RETRIES 16
3394+
3395+
/*
3396+
* Checks whether it makes sense to retry the reclaim to make a forward progress
3397+
* for the given allocation request.
3398+
* The reclaim feedback represented by did_some_progress (any progress during
3399+
* the last reclaim round), pages_reclaimed (cumulative number of reclaimed
3400+
* pages) and no_progress_loops (number of reclaim rounds without any progress
3401+
* in a row) is considered as well as the reclaimable pages on the applicable
3402+
* zone list (with a backoff mechanism which is a function of no_progress_loops).
3403+
*
3404+
* Returns true if a retry is viable or false to enter the oom path.
3405+
*/
3406+
static inline bool
3407+
should_reclaim_retry(gfp_t gfp_mask, unsigned order,
3408+
struct alloc_context *ac, int alloc_flags,
3409+
bool did_some_progress, unsigned long pages_reclaimed,
3410+
int no_progress_loops)
3411+
{
3412+
struct zone *zone;
3413+
struct zoneref *z;
3414+
3415+
/*
3416+
* Make sure we converge to OOM if we cannot make any progress
3417+
* several times in the row.
3418+
*/
3419+
if (no_progress_loops > MAX_RECLAIM_RETRIES)
3420+
return false;
3421+
3422+
if (order > PAGE_ALLOC_COSTLY_ORDER) {
3423+
if (pages_reclaimed >= (1<<order))
3424+
return false;
3425+
3426+
if (did_some_progress)
3427+
return true;
3428+
}
3429+
3430+
/*
3431+
* Keep reclaiming pages while there is a chance this will lead somewhere.
3432+
* If none of the target zones can satisfy our allocation request even
3433+
* if all reclaimable pages are considered then we are screwed and have
3434+
* to go OOM.
3435+
*/
3436+
for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, ac->high_zoneidx,
3437+
ac->nodemask) {
3438+
unsigned long available;
3439+
3440+
available = zone_reclaimable_pages(zone);
3441+
available -= DIV_ROUND_UP(no_progress_loops * available,
3442+
MAX_RECLAIM_RETRIES);
3443+
available += zone_page_state_snapshot(zone, NR_FREE_PAGES);
3444+
3445+
/*
3446+
* Would the allocation succeed if we reclaimed the whole
3447+
* available?
3448+
*/
3449+
if (__zone_watermark_ok(zone, order, min_wmark_pages(zone),
3450+
ac->high_zoneidx, alloc_flags, available)) {
3451+
/* Wait for some write requests to complete then retry */
3452+
wait_iff_congested(zone, BLK_RW_ASYNC, HZ/50);
3453+
return true;
3454+
}
3455+
}
3456+
3457+
return false;
3458+
}
3459+
33893460
static inline struct page *
33903461
__alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
33913462
struct alloc_context *ac)
@@ -3397,6 +3468,7 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
33973468
unsigned long did_some_progress;
33983469
enum migrate_mode migration_mode = MIGRATE_ASYNC;
33993470
enum compact_result compact_result;
3471+
int no_progress_loops = 0;
34003472

34013473
/*
34023474
* In the slowpath, we sanity check order to avoid ever trying to
@@ -3525,23 +3597,35 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
35253597
if (gfp_mask & __GFP_NORETRY)
35263598
goto noretry;
35273599

3528-
/* Keep reclaiming pages as long as there is reasonable progress */
3529-
pages_reclaimed += did_some_progress;
3530-
if ((did_some_progress && order <= PAGE_ALLOC_COSTLY_ORDER) ||
3531-
((gfp_mask & __GFP_REPEAT) && pages_reclaimed < (1 << order))) {
3532-
/* Wait for some write requests to complete then retry */
3533-
wait_iff_congested(ac->preferred_zoneref->zone, BLK_RW_ASYNC, HZ/50);
3534-
goto retry;
3600+
/*
3601+
* Do not retry costly high order allocations unless they are
3602+
* __GFP_REPEAT
3603+
*/
3604+
if (order > PAGE_ALLOC_COSTLY_ORDER && !(gfp_mask & __GFP_REPEAT))
3605+
goto noretry;
3606+
3607+
if (did_some_progress) {
3608+
no_progress_loops = 0;
3609+
pages_reclaimed += did_some_progress;
3610+
} else {
3611+
no_progress_loops++;
35353612
}
35363613

3614+
if (should_reclaim_retry(gfp_mask, order, ac, alloc_flags,
3615+
did_some_progress > 0, pages_reclaimed,
3616+
no_progress_loops))
3617+
goto retry;
3618+
35373619
/* Reclaim has failed us, start killing things */
35383620
page = __alloc_pages_may_oom(gfp_mask, order, ac, &did_some_progress);
35393621
if (page)
35403622
goto got_pg;
35413623

35423624
/* Retry as long as the OOM killer is making progress */
3543-
if (did_some_progress)
3625+
if (did_some_progress) {
3626+
no_progress_loops = 0;
35443627
goto retry;
3628+
}
35453629

35463630
noretry:
35473631
/*

mm/vmscan.c

Lines changed: 4 additions & 21 deletions
Original file line numberDiff line numberDiff line change
@@ -191,7 +191,7 @@ static bool sane_reclaim(struct scan_control *sc)
191191
}
192192
#endif
193193

194-
static unsigned long zone_reclaimable_pages(struct zone *zone)
194+
unsigned long zone_reclaimable_pages(struct zone *zone)
195195
{
196196
unsigned long nr;
197197

@@ -2507,18 +2507,15 @@ static inline bool compaction_ready(struct zone *zone, int order, int classzone_
25072507
*
25082508
* If a zone is deemed to be full of pinned pages then just give it a light
25092509
* scan then give up on it.
2510-
*
2511-
* Returns true if a zone was reclaimable.
25122510
*/
2513-
static bool shrink_zones(struct zonelist *zonelist, struct scan_control *sc)
2511+
static void shrink_zones(struct zonelist *zonelist, struct scan_control *sc)
25142512
{
25152513
struct zoneref *z;
25162514
struct zone *zone;
25172515
unsigned long nr_soft_reclaimed;
25182516
unsigned long nr_soft_scanned;
25192517
gfp_t orig_mask;
25202518
enum zone_type requested_highidx = gfp_zone(sc->gfp_mask);
2521-
bool reclaimable = false;
25222519

25232520
/*
25242521
* If the number of buffer_heads in the machine exceeds the maximum
@@ -2583,26 +2580,17 @@ static bool shrink_zones(struct zonelist *zonelist, struct scan_control *sc)
25832580
&nr_soft_scanned);
25842581
sc->nr_reclaimed += nr_soft_reclaimed;
25852582
sc->nr_scanned += nr_soft_scanned;
2586-
if (nr_soft_reclaimed)
2587-
reclaimable = true;
25882583
/* need some check for avoid more shrink_zone() */
25892584
}
25902585

2591-
if (shrink_zone(zone, sc, zone_idx(zone) == classzone_idx))
2592-
reclaimable = true;
2593-
2594-
if (global_reclaim(sc) &&
2595-
!reclaimable && zone_reclaimable(zone))
2596-
reclaimable = true;
2586+
shrink_zone(zone, sc, zone_idx(zone) == classzone_idx);
25972587
}
25982588

25992589
/*
26002590
* Restore to original mask to avoid the impact on the caller if we
26012591
* promoted it to __GFP_HIGHMEM.
26022592
*/
26032593
sc->gfp_mask = orig_mask;
2604-
2605-
return reclaimable;
26062594
}
26072595

26082596
/*
@@ -2627,7 +2615,6 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
26272615
int initial_priority = sc->priority;
26282616
unsigned long total_scanned = 0;
26292617
unsigned long writeback_threshold;
2630-
bool zones_reclaimable;
26312618
retry:
26322619
delayacct_freepages_start();
26332620

@@ -2638,7 +2625,7 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
26382625
vmpressure_prio(sc->gfp_mask, sc->target_mem_cgroup,
26392626
sc->priority);
26402627
sc->nr_scanned = 0;
2641-
zones_reclaimable = shrink_zones(zonelist, sc);
2628+
shrink_zones(zonelist, sc);
26422629

26432630
total_scanned += sc->nr_scanned;
26442631
if (sc->nr_reclaimed >= sc->nr_to_reclaim)
@@ -2685,10 +2672,6 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
26852672
goto retry;
26862673
}
26872674

2688-
/* Any of the zones still reclaimable? Don't OOM. */
2689-
if (zones_reclaimable)
2690-
return 1;
2691-
26922675
return 0;
26932676
}
26942677

0 commit comments

Comments
 (0)