Skip to content

Commit c822f62

Browse files
hnaztorvalds
authored andcommitted
mm: delete NR_PAGES_SCANNED and pgdat_reclaimable()
NR_PAGES_SCANNED counts number of pages scanned since the last page free event in the allocator. This was used primarily to measure the reclaimability of zones and nodes, and determine when reclaim should give up on them. In that role, it has been replaced in the preceding patches by a different mechanism. Being implemented as an efficient vmstat counter, it was automatically exported to userspace as well. It's however unlikely that anyone outside the kernel is using this counter in any meaningful way. Remove the counter and the unused pgdat_reclaimable(). Link: http://lkml.kernel.org/r/[email protected] Signed-off-by: Johannes Weiner <[email protected]> Acked-by: Hillf Danton <[email protected]> Acked-by: Michal Hocko <[email protected]> Cc: Jia He <[email protected]> Cc: Mel Gorman <[email protected]> Signed-off-by: Andrew Morton <[email protected]> Signed-off-by: Linus Torvalds <[email protected]>
1 parent 688035f commit c822f62

File tree

5 files changed

+3
-41
lines changed

5 files changed

+3
-41
lines changed

include/linux/mmzone.h

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -149,7 +149,6 @@ enum node_stat_item {
149149
NR_UNEVICTABLE, /* " " " " " */
150150
NR_ISOLATED_ANON, /* Temporary isolated pages from anon lru */
151151
NR_ISOLATED_FILE, /* Temporary isolated pages from file lru */
152-
NR_PAGES_SCANNED, /* pages scanned since last reclaim */
153152
WORKINGSET_REFAULT,
154153
WORKINGSET_ACTIVATE,
155154
WORKINGSET_NODERECLAIM,

mm/internal.h

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -91,7 +91,6 @@ extern unsigned long highest_memmap_pfn;
9191
*/
9292
extern int isolate_lru_page(struct page *page);
9393
extern void putback_lru_page(struct page *page);
94-
extern bool pgdat_reclaimable(struct pglist_data *pgdat);
9594

9695
/*
9796
* in mm/rmap.c:

mm/page_alloc.c

Lines changed: 0 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -1090,14 +1090,10 @@ static void free_pcppages_bulk(struct zone *zone, int count,
10901090
{
10911091
int migratetype = 0;
10921092
int batch_free = 0;
1093-
unsigned long nr_scanned;
10941093
bool isolated_pageblocks;
10951094

10961095
spin_lock(&zone->lock);
10971096
isolated_pageblocks = has_isolate_pageblock(zone);
1098-
nr_scanned = node_page_state(zone->zone_pgdat, NR_PAGES_SCANNED);
1099-
if (nr_scanned)
1100-
__mod_node_page_state(zone->zone_pgdat, NR_PAGES_SCANNED, -nr_scanned);
11011097

11021098
while (count) {
11031099
struct page *page;
@@ -1150,12 +1146,7 @@ static void free_one_page(struct zone *zone,
11501146
unsigned int order,
11511147
int migratetype)
11521148
{
1153-
unsigned long nr_scanned;
11541149
spin_lock(&zone->lock);
1155-
nr_scanned = node_page_state(zone->zone_pgdat, NR_PAGES_SCANNED);
1156-
if (nr_scanned)
1157-
__mod_node_page_state(zone->zone_pgdat, NR_PAGES_SCANNED, -nr_scanned);
1158-
11591150
if (unlikely(has_isolate_pageblock(zone) ||
11601151
is_migrate_isolate(migratetype))) {
11611152
migratetype = get_pfnblock_migratetype(page, pfn);
@@ -4504,7 +4495,6 @@ void show_free_areas(unsigned int filter, nodemask_t *nodemask)
45044495
#endif
45054496
" writeback_tmp:%lukB"
45064497
" unstable:%lukB"
4507-
" pages_scanned:%lu"
45084498
" all_unreclaimable? %s"
45094499
"\n",
45104500
pgdat->node_id,
@@ -4527,7 +4517,6 @@ void show_free_areas(unsigned int filter, nodemask_t *nodemask)
45274517
#endif
45284518
K(node_page_state(pgdat, NR_WRITEBACK_TEMP)),
45294519
K(node_page_state(pgdat, NR_UNSTABLE_NFS)),
4530-
node_page_state(pgdat, NR_PAGES_SCANNED),
45314520
pgdat->kswapd_failures >= MAX_RECLAIM_RETRIES ?
45324521
"yes" : "no");
45334522
}

mm/vmscan.c

Lines changed: 0 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -230,12 +230,6 @@ unsigned long pgdat_reclaimable_pages(struct pglist_data *pgdat)
230230
return nr;
231231
}
232232

233-
bool pgdat_reclaimable(struct pglist_data *pgdat)
234-
{
235-
return node_page_state_snapshot(pgdat, NR_PAGES_SCANNED) <
236-
pgdat_reclaimable_pages(pgdat) * 6;
237-
}
238-
239233
/**
240234
* lruvec_lru_size - Returns the number of pages on the given LRU list.
241235
* @lruvec: lru vector
@@ -1750,7 +1744,6 @@ shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec,
17501744
reclaim_stat->recent_scanned[file] += nr_taken;
17511745

17521746
if (global_reclaim(sc)) {
1753-
__mod_node_page_state(pgdat, NR_PAGES_SCANNED, nr_scanned);
17541747
if (current_is_kswapd())
17551748
__count_vm_events(PGSCAN_KSWAPD, nr_scanned);
17561749
else
@@ -1953,8 +1946,6 @@ static void shrink_active_list(unsigned long nr_to_scan,
19531946
__mod_node_page_state(pgdat, NR_ISOLATED_ANON + file, nr_taken);
19541947
reclaim_stat->recent_scanned[file] += nr_taken;
19551948

1956-
if (global_reclaim(sc))
1957-
__mod_node_page_state(pgdat, NR_PAGES_SCANNED, nr_scanned);
19581949
__count_vm_events(PGREFILL, nr_scanned);
19591950

19601951
spin_unlock_irq(&pgdat->lru_lock);

mm/vmstat.c

Lines changed: 3 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -954,7 +954,6 @@ const char * const vmstat_text[] = {
954954
"nr_unevictable",
955955
"nr_isolated_anon",
956956
"nr_isolated_file",
957-
"nr_pages_scanned",
958957
"workingset_refault",
959958
"workingset_activate",
960959
"workingset_nodereclaim",
@@ -1378,15 +1377,13 @@ static void zoneinfo_show_print(struct seq_file *m, pg_data_t *pgdat,
13781377
"\n min %lu"
13791378
"\n low %lu"
13801379
"\n high %lu"
1381-
"\n node_scanned %lu"
13821380
"\n spanned %lu"
13831381
"\n present %lu"
13841382
"\n managed %lu",
13851383
zone_page_state(zone, NR_FREE_PAGES),
13861384
min_wmark_pages(zone),
13871385
low_wmark_pages(zone),
13881386
high_wmark_pages(zone),
1389-
node_page_state(zone->zone_pgdat, NR_PAGES_SCANNED),
13901387
zone->spanned_pages,
13911388
zone->present_pages,
13921389
zone->managed_pages);
@@ -1586,22 +1583,9 @@ int vmstat_refresh(struct ctl_table *table, int write,
15861583
for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++) {
15871584
val = atomic_long_read(&vm_zone_stat[i]);
15881585
if (val < 0) {
1589-
switch (i) {
1590-
case NR_PAGES_SCANNED:
1591-
/*
1592-
* This is often seen to go negative in
1593-
* recent kernels, but not to go permanently
1594-
* negative. Whilst it would be nicer not to
1595-
* have exceptions, rooting them out would be
1596-
* another task, of rather low priority.
1597-
*/
1598-
break;
1599-
default:
1600-
pr_warn("%s: %s %ld\n",
1601-
__func__, vmstat_text[i], val);
1602-
err = -EINVAL;
1603-
break;
1604-
}
1586+
pr_warn("%s: %s %ld\n",
1587+
__func__, vmstat_text[i], val);
1588+
err = -EINVAL;
16051589
}
16061590
}
16071591
if (err)

0 commit comments

Comments
 (0)