Skip to content

Commit 4f5b0c1

Browse files
rppttorvalds
authored andcommitted
arm, arm64: move free_unused_memmap() to generic mm
ARM and ARM64 free unused parts of the memory map just before the initialization of the page allocator. To allow holes in the memory map both architectures overload pfn_valid() and define HAVE_ARCH_PFN_VALID. Allowing holes in the memory map for FLATMEM may be useful for small machines, such as ARC and m68k and will enable those architectures to cease using DISCONTIGMEM and still support more than one memory bank. Move the functions that free unused memory map to generic mm and enable them in case HAVE_ARCH_PFN_VALID=y. Link: https://lkml.kernel.org/r/[email protected] Signed-off-by: Mike Rapoport <[email protected]> Acked-by: Catalin Marinas <[email protected]> [arm64] Cc: Alexey Dobriyan <[email protected]> Cc: Geert Uytterhoeven <[email protected]> Cc: Greg Ungerer <[email protected]> Cc: John Paul Adrian Glaubitz <[email protected]> Cc: Jonathan Corbet <[email protected]> Cc: Matt Turner <[email protected]> Cc: Meelis Roos <[email protected]> Cc: Michael Schmitz <[email protected]> Cc: Russell King <[email protected]> Cc: Tony Luck <[email protected]> Cc: Vineet Gupta <[email protected]> Cc: Will Deacon <[email protected]> Signed-off-by: Andrew Morton <[email protected]> Signed-off-by: Linus Torvalds <[email protected]>
1 parent 5e545df commit 4f5b0c1

File tree

6 files changed

+85
-152
lines changed

6 files changed

+85
-152
lines changed

arch/Kconfig

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1044,6 +1044,9 @@ config ARCH_WANT_LD_ORPHAN_WARN
10441044
by the linker, since the locations of such sections can change between linker
10451045
versions.
10461046

1047+
config HAVE_ARCH_PFN_VALID
1048+
bool
1049+
10471050
source "kernel/gcov/Kconfig"
10481051

10491052
source "scripts/gcc-plugins/Kconfig"

arch/arm/Kconfig

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -69,6 +69,7 @@ config ARM
6969
select HAVE_ARCH_JUMP_LABEL if !XIP_KERNEL && !CPU_ENDIAN_BE32 && MMU
7070
select HAVE_ARCH_KGDB if !CPU_ENDIAN_BE32 && MMU
7171
select HAVE_ARCH_MMAP_RND_BITS if MMU
72+
select HAVE_ARCH_PFN_VALID
7273
select HAVE_ARCH_SECCOMP
7374
select HAVE_ARCH_SECCOMP_FILTER if AEABI && !OABI_COMPAT
7475
select HAVE_ARCH_THREAD_STRUCT_WHITELIST
@@ -1489,9 +1490,6 @@ config ARCH_SPARSEMEM_ENABLE
14891490
bool
14901491
select SPARSEMEM_STATIC if SPARSEMEM
14911492

1492-
config HAVE_ARCH_PFN_VALID
1493-
def_bool y
1494-
14951493
config HIGHMEM
14961494
bool "High Memory Support"
14971495
depends on MMU

arch/arm/mm/init.c

Lines changed: 0 additions & 78 deletions
Original file line numberDiff line numberDiff line change
@@ -267,83 +267,6 @@ static inline void poison_init_mem(void *s, size_t count)
267267
*p++ = 0xe7fddef0;
268268
}
269269

270-
static inline void __init
271-
free_memmap(unsigned long start_pfn, unsigned long end_pfn)
272-
{
273-
struct page *start_pg, *end_pg;
274-
phys_addr_t pg, pgend;
275-
276-
/*
277-
* Convert start_pfn/end_pfn to a struct page pointer.
278-
*/
279-
start_pg = pfn_to_page(start_pfn - 1) + 1;
280-
end_pg = pfn_to_page(end_pfn - 1) + 1;
281-
282-
/*
283-
* Convert to physical addresses, and
284-
* round start upwards and end downwards.
285-
*/
286-
pg = PAGE_ALIGN(__pa(start_pg));
287-
pgend = __pa(end_pg) & PAGE_MASK;
288-
289-
/*
290-
* If there are free pages between these,
291-
* free the section of the memmap array.
292-
*/
293-
if (pg < pgend)
294-
memblock_free_early(pg, pgend - pg);
295-
}
296-
297-
/*
298-
* The mem_map array can get very big. Free the unused area of the memory map.
299-
*/
300-
static void __init free_unused_memmap(void)
301-
{
302-
unsigned long start, end, prev_end = 0;
303-
int i;
304-
305-
/*
306-
* This relies on each bank being in address order.
307-
* The banks are sorted previously in bootmem_init().
308-
*/
309-
for_each_mem_pfn_range(i, MAX_NUMNODES, &start, &end, NULL) {
310-
#ifdef CONFIG_SPARSEMEM
311-
/*
312-
* Take care not to free memmap entries that don't exist
313-
* due to SPARSEMEM sections which aren't present.
314-
*/
315-
start = min(start,
316-
ALIGN(prev_end, PAGES_PER_SECTION));
317-
#else
318-
/*
319-
* Align down here since the VM subsystem insists that the
320-
* memmap entries are valid from the bank start aligned to
321-
* MAX_ORDER_NR_PAGES.
322-
*/
323-
start = round_down(start, MAX_ORDER_NR_PAGES);
324-
#endif
325-
/*
326-
* If we had a previous bank, and there is a space
327-
* between the current bank and the previous, free it.
328-
*/
329-
if (prev_end && prev_end < start)
330-
free_memmap(prev_end, start);
331-
332-
/*
333-
* Align up here since the VM subsystem insists that the
334-
* memmap entries are valid from the bank end aligned to
335-
* MAX_ORDER_NR_PAGES.
336-
*/
337-
prev_end = ALIGN(end, MAX_ORDER_NR_PAGES);
338-
}
339-
340-
#ifdef CONFIG_SPARSEMEM
341-
if (!IS_ALIGNED(prev_end, PAGES_PER_SECTION))
342-
free_memmap(prev_end,
343-
ALIGN(prev_end, PAGES_PER_SECTION));
344-
#endif
345-
}
346-
347270
static void __init free_highpages(void)
348271
{
349272
#ifdef CONFIG_HIGHMEM
@@ -385,7 +308,6 @@ void __init mem_init(void)
385308
set_max_mapnr(pfn_to_page(max_pfn) - mem_map);
386309

387310
/* this will put all unused low memory onto the freelists */
388-
free_unused_memmap();
389311
memblock_free_all();
390312

391313
#ifdef CONFIG_SA1111

arch/arm64/Kconfig

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -140,6 +140,7 @@ config ARM64
140140
select HAVE_ARCH_KGDB
141141
select HAVE_ARCH_MMAP_RND_BITS
142142
select HAVE_ARCH_MMAP_RND_COMPAT_BITS if COMPAT
143+
select HAVE_ARCH_PFN_VALID
143144
select HAVE_ARCH_PREL32_RELOCATIONS
144145
select HAVE_ARCH_SECCOMP_FILTER
145146
select HAVE_ARCH_STACKLEAK
@@ -1043,9 +1044,6 @@ config ARCH_SELECT_MEMORY_MODEL
10431044
config ARCH_FLATMEM_ENABLE
10441045
def_bool !NUMA
10451046

1046-
config HAVE_ARCH_PFN_VALID
1047-
def_bool y
1048-
10491047
config HW_PERF_EVENTS
10501048
def_bool y
10511049
depends on ARM_PMU

arch/arm64/mm/init.c

Lines changed: 0 additions & 68 deletions
Original file line numberDiff line numberDiff line change
@@ -430,71 +430,6 @@ void __init bootmem_init(void)
430430
memblock_dump_all();
431431
}
432432

433-
#ifndef CONFIG_SPARSEMEM_VMEMMAP
434-
static inline void free_memmap(unsigned long start_pfn, unsigned long end_pfn)
435-
{
436-
struct page *start_pg, *end_pg;
437-
unsigned long pg, pgend;
438-
439-
/*
440-
* Convert start_pfn/end_pfn to a struct page pointer.
441-
*/
442-
start_pg = pfn_to_page(start_pfn - 1) + 1;
443-
end_pg = pfn_to_page(end_pfn - 1) + 1;
444-
445-
/*
446-
* Convert to physical addresses, and round start upwards and end
447-
* downwards.
448-
*/
449-
pg = (unsigned long)PAGE_ALIGN(__pa(start_pg));
450-
pgend = (unsigned long)__pa(end_pg) & PAGE_MASK;
451-
452-
/*
453-
* If there are free pages between these, free the section of the
454-
* memmap array.
455-
*/
456-
if (pg < pgend)
457-
memblock_free(pg, pgend - pg);
458-
}
459-
460-
/*
461-
* The mem_map array can get very big. Free the unused area of the memory map.
462-
*/
463-
static void __init free_unused_memmap(void)
464-
{
465-
unsigned long start, end, prev_end = 0;
466-
int i;
467-
468-
for_each_mem_pfn_range(i, MAX_NUMNODES, &start, &end, NULL) {
469-
#ifdef CONFIG_SPARSEMEM
470-
/*
471-
* Take care not to free memmap entries that don't exist due
472-
* to SPARSEMEM sections which aren't present.
473-
*/
474-
start = min(start, ALIGN(prev_end, PAGES_PER_SECTION));
475-
#endif
476-
/*
477-
* If we had a previous bank, and there is a space between the
478-
* current bank and the previous, free it.
479-
*/
480-
if (prev_end && prev_end < start)
481-
free_memmap(prev_end, start);
482-
483-
/*
484-
* Align up here since the VM subsystem insists that the
485-
* memmap entries are valid from the bank end aligned to
486-
* MAX_ORDER_NR_PAGES.
487-
*/
488-
prev_end = ALIGN(end, MAX_ORDER_NR_PAGES);
489-
}
490-
491-
#ifdef CONFIG_SPARSEMEM
492-
if (!IS_ALIGNED(prev_end, PAGES_PER_SECTION))
493-
free_memmap(prev_end, ALIGN(prev_end, PAGES_PER_SECTION));
494-
#endif
495-
}
496-
#endif /* !CONFIG_SPARSEMEM_VMEMMAP */
497-
498433
/*
499434
* mem_init() marks the free areas in the mem_map and tells us how much memory
500435
* is free. This is done after various parts of the system have claimed their
@@ -510,9 +445,6 @@ void __init mem_init(void)
510445

511446
set_max_mapnr(max_pfn - PHYS_PFN_OFFSET);
512447

513-
#ifndef CONFIG_SPARSEMEM_VMEMMAP
514-
free_unused_memmap();
515-
#endif
516448
/* this will put all unused low memory onto the freelists */
517449
memblock_free_all();
518450

mm/memblock.c

Lines changed: 80 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1926,6 +1926,85 @@ static int __init early_memblock(char *p)
19261926
}
19271927
early_param("memblock", early_memblock);
19281928

1929+
static void __init free_memmap(unsigned long start_pfn, unsigned long end_pfn)
1930+
{
1931+
struct page *start_pg, *end_pg;
1932+
phys_addr_t pg, pgend;
1933+
1934+
/*
1935+
* Convert start_pfn/end_pfn to a struct page pointer.
1936+
*/
1937+
start_pg = pfn_to_page(start_pfn - 1) + 1;
1938+
end_pg = pfn_to_page(end_pfn - 1) + 1;
1939+
1940+
/*
1941+
* Convert to physical addresses, and round start upwards and end
1942+
* downwards.
1943+
*/
1944+
pg = PAGE_ALIGN(__pa(start_pg));
1945+
pgend = __pa(end_pg) & PAGE_MASK;
1946+
1947+
/*
1948+
* If there are free pages between these, free the section of the
1949+
* memmap array.
1950+
*/
1951+
if (pg < pgend)
1952+
memblock_free(pg, pgend - pg);
1953+
}
1954+
1955+
/*
1956+
* The mem_map array can get very big. Free the unused area of the memory map.
1957+
*/
1958+
static void __init free_unused_memmap(void)
1959+
{
1960+
unsigned long start, end, prev_end = 0;
1961+
int i;
1962+
1963+
if (!IS_ENABLED(CONFIG_HAVE_ARCH_PFN_VALID) ||
1964+
IS_ENABLED(CONFIG_SPARSEMEM_VMEMMAP))
1965+
return;
1966+
1967+
/*
1968+
* This relies on each bank being in address order.
1969+
* The banks are sorted previously in bootmem_init().
1970+
*/
1971+
for_each_mem_pfn_range(i, MAX_NUMNODES, &start, &end, NULL) {
1972+
#ifdef CONFIG_SPARSEMEM
1973+
/*
1974+
* Take care not to free memmap entries that don't exist
1975+
* due to SPARSEMEM sections which aren't present.
1976+
*/
1977+
start = min(start, ALIGN(prev_end, PAGES_PER_SECTION));
1978+
#else
1979+
/*
1980+
* Align down here since the VM subsystem insists that the
1981+
* memmap entries are valid from the bank start aligned to
1982+
* MAX_ORDER_NR_PAGES.
1983+
*/
1984+
start = round_down(start, MAX_ORDER_NR_PAGES);
1985+
#endif
1986+
1987+
/*
1988+
* If we had a previous bank, and there is a space
1989+
* between the current bank and the previous, free it.
1990+
*/
1991+
if (prev_end && prev_end < start)
1992+
free_memmap(prev_end, start);
1993+
1994+
/*
1995+
* Align up here since the VM subsystem insists that the
1996+
* memmap entries are valid from the bank end aligned to
1997+
* MAX_ORDER_NR_PAGES.
1998+
*/
1999+
prev_end = ALIGN(end, MAX_ORDER_NR_PAGES);
2000+
}
2001+
2002+
#ifdef CONFIG_SPARSEMEM
2003+
if (!IS_ALIGNED(prev_end, PAGES_PER_SECTION))
2004+
free_memmap(prev_end, ALIGN(prev_end, PAGES_PER_SECTION));
2005+
#endif
2006+
}
2007+
19292008
static void __init __free_pages_memory(unsigned long start, unsigned long end)
19302009
{
19312010
int order;
@@ -2012,6 +2091,7 @@ unsigned long __init memblock_free_all(void)
20122091
{
20132092
unsigned long pages;
20142093

2094+
free_unused_memmap();
20152095
reset_all_zones_managed_pages();
20162096

20172097
pages = free_low_memory_core_early();

0 commit comments

Comments
 (0)