Skip to content

Commit 77d4459

Browse files
yuzhaogoogleakpm00
authored andcommitted
mm: multi-gen LRU: shuffle should_run_aging()
Move should_run_aging() next to its only caller left. Link: https://lkml.kernel.org/r/[email protected] Signed-off-by: Yu Zhao <[email protected]> Cc: Johannes Weiner <[email protected]> Cc: Jonathan Corbet <[email protected]> Cc: Michael Larabel <[email protected]> Cc: Michal Hocko <[email protected]> Cc: Mike Rapoport <[email protected]> Cc: Roman Gushchin <[email protected]> Cc: Suren Baghdasaryan <[email protected]> Signed-off-by: Andrew Morton <[email protected]>
1 parent 7348cc9 commit 77d4459

File tree

1 file changed

+62
-62
lines changed

1 file changed

+62
-62
lines changed

mm/vmscan.c

Lines changed: 62 additions & 62 deletions
Original file line numberDiff line numberDiff line change
@@ -4467,68 +4467,6 @@ static bool try_to_inc_max_seq(struct lruvec *lruvec, unsigned long max_seq,
44674467
return true;
44684468
}
44694469

4470-
static bool should_run_aging(struct lruvec *lruvec, unsigned long max_seq,
4471-
struct scan_control *sc, bool can_swap, unsigned long *nr_to_scan)
4472-
{
4473-
int gen, type, zone;
4474-
unsigned long old = 0;
4475-
unsigned long young = 0;
4476-
unsigned long total = 0;
4477-
struct lru_gen_folio *lrugen = &lruvec->lrugen;
4478-
struct mem_cgroup *memcg = lruvec_memcg(lruvec);
4479-
DEFINE_MIN_SEQ(lruvec);
4480-
4481-
/* whether this lruvec is completely out of cold folios */
4482-
if (min_seq[!can_swap] + MIN_NR_GENS > max_seq) {
4483-
*nr_to_scan = 0;
4484-
return true;
4485-
}
4486-
4487-
for (type = !can_swap; type < ANON_AND_FILE; type++) {
4488-
unsigned long seq;
4489-
4490-
for (seq = min_seq[type]; seq <= max_seq; seq++) {
4491-
unsigned long size = 0;
4492-
4493-
gen = lru_gen_from_seq(seq);
4494-
4495-
for (zone = 0; zone < MAX_NR_ZONES; zone++)
4496-
size += max(READ_ONCE(lrugen->nr_pages[gen][type][zone]), 0L);
4497-
4498-
total += size;
4499-
if (seq == max_seq)
4500-
young += size;
4501-
else if (seq + MIN_NR_GENS == max_seq)
4502-
old += size;
4503-
}
4504-
}
4505-
4506-
/* try to scrape all its memory if this memcg was deleted */
4507-
*nr_to_scan = mem_cgroup_online(memcg) ? (total >> sc->priority) : total;
4508-
4509-
/*
4510-
* The aging tries to be lazy to reduce the overhead, while the eviction
4511-
* stalls when the number of generations reaches MIN_NR_GENS. Hence, the
4512-
* ideal number of generations is MIN_NR_GENS+1.
4513-
*/
4514-
if (min_seq[!can_swap] + MIN_NR_GENS < max_seq)
4515-
return false;
4516-
4517-
/*
4518-
* It's also ideal to spread pages out evenly, i.e., 1/(MIN_NR_GENS+1)
4519-
* of the total number of pages for each generation. A reasonable range
4520-
* for this average portion is [1/MIN_NR_GENS, 1/(MIN_NR_GENS+2)]. The
4521-
* aging cares about the upper bound of hot pages, while the eviction
4522-
* cares about the lower bound of cold pages.
4523-
*/
4524-
if (young * MIN_NR_GENS > total)
4525-
return true;
4526-
if (old * (MIN_NR_GENS + 2) < total)
4527-
return true;
4528-
4529-
return false;
4530-
}
4531-
45324470
static bool lruvec_is_sizable(struct lruvec *lruvec, struct scan_control *sc)
45334471
{
45344472
int gen, type, zone;
@@ -5112,6 +5050,68 @@ static int evict_folios(struct lruvec *lruvec, struct scan_control *sc, int swap
51125050
return scanned;
51135051
}
51145052

5053+
static bool should_run_aging(struct lruvec *lruvec, unsigned long max_seq,
5054+
struct scan_control *sc, bool can_swap, unsigned long *nr_to_scan)
5055+
{
5056+
int gen, type, zone;
5057+
unsigned long old = 0;
5058+
unsigned long young = 0;
5059+
unsigned long total = 0;
5060+
struct lru_gen_folio *lrugen = &lruvec->lrugen;
5061+
struct mem_cgroup *memcg = lruvec_memcg(lruvec);
5062+
DEFINE_MIN_SEQ(lruvec);
5063+
5064+
/* whether this lruvec is completely out of cold folios */
5065+
if (min_seq[!can_swap] + MIN_NR_GENS > max_seq) {
5066+
*nr_to_scan = 0;
5067+
return true;
5068+
}
5069+
5070+
for (type = !can_swap; type < ANON_AND_FILE; type++) {
5071+
unsigned long seq;
5072+
5073+
for (seq = min_seq[type]; seq <= max_seq; seq++) {
5074+
unsigned long size = 0;
5075+
5076+
gen = lru_gen_from_seq(seq);
5077+
5078+
for (zone = 0; zone < MAX_NR_ZONES; zone++)
5079+
size += max(READ_ONCE(lrugen->nr_pages[gen][type][zone]), 0L);
5080+
5081+
total += size;
5082+
if (seq == max_seq)
5083+
young += size;
5084+
else if (seq + MIN_NR_GENS == max_seq)
5085+
old += size;
5086+
}
5087+
}
5088+
5089+
/* try to scrape all its memory if this memcg was deleted */
5090+
*nr_to_scan = mem_cgroup_online(memcg) ? (total >> sc->priority) : total;
5091+
5092+
/*
5093+
* The aging tries to be lazy to reduce the overhead, while the eviction
5094+
* stalls when the number of generations reaches MIN_NR_GENS. Hence, the
5095+
* ideal number of generations is MIN_NR_GENS+1.
5096+
*/
5097+
if (min_seq[!can_swap] + MIN_NR_GENS < max_seq)
5098+
return false;
5099+
5100+
/*
5101+
* It's also ideal to spread pages out evenly, i.e., 1/(MIN_NR_GENS+1)
5102+
* of the total number of pages for each generation. A reasonable range
5103+
* for this average portion is [1/MIN_NR_GENS, 1/(MIN_NR_GENS+2)]. The
5104+
* aging cares about the upper bound of hot pages, while the eviction
5105+
* cares about the lower bound of cold pages.
5106+
*/
5107+
if (young * MIN_NR_GENS > total)
5108+
return true;
5109+
if (old * (MIN_NR_GENS + 2) < total)
5110+
return true;
5111+
5112+
return false;
5113+
}
5114+
51155115
/*
51165116
* For future optimizations:
51175117
* 1. Defer try_to_inc_max_seq() to workqueues to reduce latency for memcg

0 commit comments

Comments
 (0)