Skip to content

Commit 7b4b2a0

Browse files
jiangliutorvalds
authored andcommitted
mm: accurately calculate zone->managed_pages for highmem zones
Commit "mm: introduce new field 'managed_pages' to struct zone" assumes that all highmem pages will be freed into the buddy system by function mem_init(). But that's not always true, some architectures may reserve some highmem pages during boot. For example PPC may allocate highmem pages for giagant HugeTLB pages, and several architectures have code to check PageReserved flag to exclude highmem pages allocated during boot when freeing highmem pages into the buddy system. So treat highmem pages in the same way as normal pages, that is to: 1) reset zone->managed_pages to zero in mem_init(). 2) recalculate managed_pages when freeing pages into the buddy system. Signed-off-by: Jiang Liu <[email protected]> Cc: "H. Peter Anvin" <[email protected]> Cc: Tejun Heo <[email protected]> Cc: Joonsoo Kim <[email protected]> Cc: Yinghai Lu <[email protected]> Cc: Mel Gorman <[email protected]> Cc: Minchan Kim <[email protected]> Cc: Kamezawa Hiroyuki <[email protected]> Cc: Marek Szyprowski <[email protected]> Cc: "Michael S. Tsirkin" <[email protected]> Cc: <[email protected]> Cc: Arnd Bergmann <[email protected]> Cc: Catalin Marinas <[email protected]> Cc: Chris Metcalf <[email protected]> Cc: David Howells <[email protected]> Cc: Geert Uytterhoeven <[email protected]> Cc: Ingo Molnar <[email protected]> Cc: Jeremy Fitzhardinge <[email protected]> Cc: Jianguo Wu <[email protected]> Cc: Konrad Rzeszutek Wilk <[email protected]> Cc: Michel Lespinasse <[email protected]> Cc: Rik van Riel <[email protected]> Cc: Rusty Russell <[email protected]> Cc: Tang Chen <[email protected]> Cc: Thomas Gleixner <[email protected]> Cc: Wen Congyang <[email protected]> Cc: Will Deacon <[email protected]> Cc: Yasuaki Ishimatsu <[email protected]> Cc: Russell King <[email protected]> Signed-off-by: Andrew Morton <[email protected]> Signed-off-by: Linus Torvalds <[email protected]>
1 parent 4f9f477 commit 7b4b2a0

File tree

6 files changed

+48
-28
lines changed

6 files changed

+48
-28
lines changed

arch/metag/mm/init.c

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -380,6 +380,12 @@ void __init mem_init(void)
380380

381381
#ifdef CONFIG_HIGHMEM
382382
unsigned long tmp;
383+
384+
/*
385+
* Explicitly reset zone->managed_pages because highmem pages are
386+
* freed before calling free_all_bootmem_node();
387+
*/
388+
reset_all_zones_managed_pages();
383389
for (tmp = highstart_pfn; tmp < highend_pfn; tmp++)
384390
free_highmem_page(pfn_to_page(tmp));
385391
num_physpages += totalhigh_pages;

arch/x86/mm/highmem_32.c

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,7 @@
11
#include <linux/highmem.h>
22
#include <linux/module.h>
33
#include <linux/swap.h> /* for totalram_pages */
4+
#include <linux/bootmem.h>
45

56
void *kmap(struct page *page)
67
{
@@ -121,6 +122,11 @@ void __init set_highmem_pages_init(void)
121122
struct zone *zone;
122123
int nid;
123124

125+
/*
126+
* Explicitly reset zone->managed_pages because set_highmem_pages_init()
127+
* is invoked before free_all_bootmem()
128+
*/
129+
reset_all_zones_managed_pages();
124130
for_each_zone(zone) {
125131
unsigned long zone_start_pfn, zone_end_pfn;
126132

include/linux/bootmem.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -46,6 +46,7 @@ extern unsigned long init_bootmem(unsigned long addr, unsigned long memend);
4646

4747
extern unsigned long free_all_bootmem_node(pg_data_t *pgdat);
4848
extern unsigned long free_all_bootmem(void);
49+
extern void reset_all_zones_managed_pages(void);
4950

5051
extern void free_bootmem_node(pg_data_t *pgdat,
5152
unsigned long addr,

mm/bootmem.c

Lines changed: 18 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -241,20 +241,26 @@ static unsigned long __init free_all_bootmem_core(bootmem_data_t *bdata)
241241
return count;
242242
}
243243

244-
static void reset_node_lowmem_managed_pages(pg_data_t *pgdat)
244+
static int reset_managed_pages_done __initdata;
245+
246+
static inline void __init reset_node_managed_pages(pg_data_t *pgdat)
245247
{
246248
struct zone *z;
247249

248-
/*
249-
* In free_area_init_core(), highmem zone's managed_pages is set to
250-
* present_pages, and bootmem allocator doesn't allocate from highmem
251-
* zones. So there's no need to recalculate managed_pages because all
252-
* highmem pages will be managed by the buddy system. Here highmem
253-
* zone also includes highmem movable zone.
254-
*/
250+
if (reset_managed_pages_done)
251+
return;
252+
255253
for (z = pgdat->node_zones; z < pgdat->node_zones + MAX_NR_ZONES; z++)
256-
if (!is_highmem(z))
257-
z->managed_pages = 0;
254+
z->managed_pages = 0;
255+
}
256+
257+
void __init reset_all_zones_managed_pages(void)
258+
{
259+
struct pglist_data *pgdat;
260+
261+
for_each_online_pgdat(pgdat)
262+
reset_node_managed_pages(pgdat);
263+
reset_managed_pages_done = 1;
258264
}
259265

260266
/**
@@ -266,7 +272,7 @@ static void reset_node_lowmem_managed_pages(pg_data_t *pgdat)
266272
unsigned long __init free_all_bootmem_node(pg_data_t *pgdat)
267273
{
268274
register_page_bootmem_info_node(pgdat);
269-
reset_node_lowmem_managed_pages(pgdat);
275+
reset_node_managed_pages(pgdat);
270276
return free_all_bootmem_core(pgdat->bdata);
271277
}
272278

@@ -279,10 +285,8 @@ unsigned long __init free_all_bootmem(void)
279285
{
280286
unsigned long total_pages = 0;
281287
bootmem_data_t *bdata;
282-
struct pglist_data *pgdat;
283288

284-
for_each_online_pgdat(pgdat)
285-
reset_node_lowmem_managed_pages(pgdat);
289+
reset_all_zones_managed_pages();
286290

287291
list_for_each_entry(bdata, &bdata_list, list)
288292
total_pages += free_all_bootmem_core(bdata);

mm/nobootmem.c

Lines changed: 16 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -137,20 +137,25 @@ static unsigned long __init free_low_memory_core_early(void)
137137
return count;
138138
}
139139

140-
static void reset_node_lowmem_managed_pages(pg_data_t *pgdat)
140+
static int reset_managed_pages_done __initdata;
141+
142+
static inline void __init reset_node_managed_pages(pg_data_t *pgdat)
141143
{
142144
struct zone *z;
143145

144-
/*
145-
* In free_area_init_core(), highmem zone's managed_pages is set to
146-
* present_pages, and bootmem allocator doesn't allocate from highmem
147-
* zones. So there's no need to recalculate managed_pages because all
148-
* highmem pages will be managed by the buddy system. Here highmem
149-
* zone also includes highmem movable zone.
150-
*/
146+
if (reset_managed_pages_done)
147+
return;
151148
for (z = pgdat->node_zones; z < pgdat->node_zones + MAX_NR_ZONES; z++)
152-
if (!is_highmem(z))
153-
z->managed_pages = 0;
149+
z->managed_pages = 0;
150+
}
151+
152+
void __init reset_all_zones_managed_pages(void)
153+
{
154+
struct pglist_data *pgdat;
155+
156+
for_each_online_pgdat(pgdat)
157+
reset_node_managed_pages(pgdat);
158+
reset_managed_pages_done = 1;
154159
}
155160

156161
/**
@@ -160,10 +165,7 @@ static void reset_node_lowmem_managed_pages(pg_data_t *pgdat)
160165
*/
161166
unsigned long __init free_all_bootmem(void)
162167
{
163-
struct pglist_data *pgdat;
164-
165-
for_each_online_pgdat(pgdat)
166-
reset_node_lowmem_managed_pages(pgdat);
168+
reset_all_zones_managed_pages();
167169

168170
/*
169171
* We need to use MAX_NUMNODES instead of NODE_DATA(0)->node_id

mm/page_alloc.c

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -5232,6 +5232,7 @@ void free_highmem_page(struct page *page)
52325232
{
52335233
__free_reserved_page(page);
52345234
totalram_pages++;
5235+
page_zone(page)->managed_pages++;
52355236
totalhigh_pages++;
52365237
}
52375238
#endif

0 commit comments

Comments
 (0)