Skip to content

Commit ed8ece2

Browse files
hansendcLinus Torvalds
authored andcommitted
[PATCH] memory hotplug prep: break out zone initialization
If a zone is empty at boot-time and then hot-added to later, it needs to run the same init code that would have been run on it at boot. This patch breaks out zone table and per-cpu-pages functions for use by the hotplug code. You can almost see all of the free_area_init_core() function on one page now. :) Signed-off-by: Dave Hansen <[email protected]> Signed-off-by: Andrew Morton <[email protected]> Signed-off-by: Linus Torvalds <[email protected]>
1 parent 2774812 commit ed8ece2

File tree

1 file changed

+58
-40
lines changed

1 file changed

+58
-40
lines changed

mm/page_alloc.c

Lines changed: 58 additions & 40 deletions
Original file line numberDiff line numberDiff line change
@@ -1875,6 +1875,60 @@ void __init setup_per_cpu_pageset()
18751875

18761876
#endif
18771877

1878+
static __devinit
1879+
void zone_wait_table_init(struct zone *zone, unsigned long zone_size_pages)
1880+
{
1881+
int i;
1882+
struct pglist_data *pgdat = zone->zone_pgdat;
1883+
1884+
/*
1885+
* The per-page waitqueue mechanism uses hashed waitqueues
1886+
* per zone.
1887+
*/
1888+
zone->wait_table_size = wait_table_size(zone_size_pages);
1889+
zone->wait_table_bits = wait_table_bits(zone->wait_table_size);
1890+
zone->wait_table = (wait_queue_head_t *)
1891+
alloc_bootmem_node(pgdat, zone->wait_table_size
1892+
* sizeof(wait_queue_head_t));
1893+
1894+
for(i = 0; i < zone->wait_table_size; ++i)
1895+
init_waitqueue_head(zone->wait_table + i);
1896+
}
1897+
1898+
static __devinit void zone_pcp_init(struct zone *zone)
1899+
{
1900+
int cpu;
1901+
unsigned long batch = zone_batchsize(zone);
1902+
1903+
for (cpu = 0; cpu < NR_CPUS; cpu++) {
1904+
#ifdef CONFIG_NUMA
1905+
/* Early boot. Slab allocator not functional yet */
1906+
zone->pageset[cpu] = &boot_pageset[cpu];
1907+
setup_pageset(&boot_pageset[cpu],0);
1908+
#else
1909+
setup_pageset(zone_pcp(zone,cpu), batch);
1910+
#endif
1911+
}
1912+
printk(KERN_DEBUG " %s zone: %lu pages, LIFO batch:%lu\n",
1913+
zone->name, zone->present_pages, batch);
1914+
}
1915+
1916+
static __devinit void init_currently_empty_zone(struct zone *zone,
1917+
unsigned long zone_start_pfn, unsigned long size)
1918+
{
1919+
struct pglist_data *pgdat = zone->zone_pgdat;
1920+
1921+
zone_wait_table_init(zone, size);
1922+
pgdat->nr_zones = zone_idx(zone) + 1;
1923+
1924+
zone->zone_mem_map = pfn_to_page(zone_start_pfn);
1925+
zone->zone_start_pfn = zone_start_pfn;
1926+
1927+
memmap_init(size, pgdat->node_id, zone_idx(zone), zone_start_pfn);
1928+
1929+
zone_init_free_lists(pgdat, zone, zone->spanned_pages);
1930+
}
1931+
18781932
/*
18791933
* Set up the zone data structures:
18801934
* - mark all pages reserved
@@ -1884,8 +1938,8 @@ void __init setup_per_cpu_pageset()
18841938
static void __init free_area_init_core(struct pglist_data *pgdat,
18851939
unsigned long *zones_size, unsigned long *zholes_size)
18861940
{
1887-
unsigned long i, j;
1888-
int cpu, nid = pgdat->node_id;
1941+
unsigned long j;
1942+
int nid = pgdat->node_id;
18891943
unsigned long zone_start_pfn = pgdat->node_start_pfn;
18901944

18911945
pgdat->nr_zones = 0;
@@ -1895,7 +1949,6 @@ static void __init free_area_init_core(struct pglist_data *pgdat,
18951949
for (j = 0; j < MAX_NR_ZONES; j++) {
18961950
struct zone *zone = pgdat->node_zones + j;
18971951
unsigned long size, realsize;
1898-
unsigned long batch;
18991952

19001953
realsize = size = zones_size[j];
19011954
if (zholes_size)
@@ -1915,19 +1968,7 @@ static void __init free_area_init_core(struct pglist_data *pgdat,
19151968

19161969
zone->temp_priority = zone->prev_priority = DEF_PRIORITY;
19171970

1918-
batch = zone_batchsize(zone);
1919-
1920-
for (cpu = 0; cpu < NR_CPUS; cpu++) {
1921-
#ifdef CONFIG_NUMA
1922-
/* Early boot. Slab allocator not functional yet */
1923-
zone->pageset[cpu] = &boot_pageset[cpu];
1924-
setup_pageset(&boot_pageset[cpu],0);
1925-
#else
1926-
setup_pageset(zone_pcp(zone,cpu), batch);
1927-
#endif
1928-
}
1929-
printk(KERN_DEBUG " %s zone: %lu pages, LIFO batch:%lu\n",
1930-
zone_names[j], realsize, batch);
1971+
zone_pcp_init(zone);
19311972
INIT_LIST_HEAD(&zone->active_list);
19321973
INIT_LIST_HEAD(&zone->inactive_list);
19331974
zone->nr_scan_active = 0;
@@ -1938,32 +1979,9 @@ static void __init free_area_init_core(struct pglist_data *pgdat,
19381979
if (!size)
19391980
continue;
19401981

1941-
/*
1942-
* The per-page waitqueue mechanism uses hashed waitqueues
1943-
* per zone.
1944-
*/
1945-
zone->wait_table_size = wait_table_size(size);
1946-
zone->wait_table_bits =
1947-
wait_table_bits(zone->wait_table_size);
1948-
zone->wait_table = (wait_queue_head_t *)
1949-
alloc_bootmem_node(pgdat, zone->wait_table_size
1950-
* sizeof(wait_queue_head_t));
1951-
1952-
for(i = 0; i < zone->wait_table_size; ++i)
1953-
init_waitqueue_head(zone->wait_table + i);
1954-
1955-
pgdat->nr_zones = j+1;
1956-
1957-
zone->zone_mem_map = pfn_to_page(zone_start_pfn);
1958-
zone->zone_start_pfn = zone_start_pfn;
1959-
1960-
memmap_init(size, nid, j, zone_start_pfn);
1961-
19621982
zonetable_add(zone, nid, j, zone_start_pfn, size);
1963-
1983+
init_currently_empty_zone(zone, zone_start_pfn, size);
19641984
zone_start_pfn += size;
1965-
1966-
zone_init_free_lists(pgdat, zone, zone->spanned_pages);
19671985
}
19681986
}
19691987

0 commit comments

Comments
 (0)