Skip to content

Commit 2a3cb8b

Browse files
Pavel Tatashintorvalds
authored andcommitted
mm/sparse: delete old sparse_init and enable new one
Rename new_sparse_init() to sparse_init() which enables it. Delete old sparse_init() and all the code that became obsolete with. [[email protected]: remove unused sparse_mem_maps_populate_node()] Link: http://lkml.kernel.org/r/[email protected] Link: http://lkml.kernel.org/r/[email protected] Signed-off-by: Pavel Tatashin <[email protected]> Tested-by: Michael Ellerman <[email protected]> [powerpc] Tested-by: Oscar Salvador <[email protected]> Reviewed-by: Oscar Salvador <[email protected]> Cc: Pasha Tatashin <[email protected]> Cc: Abdul Haleem <[email protected]> Cc: Baoquan He <[email protected]> Cc: Daniel Jordan <[email protected]> Cc: Dan Williams <[email protected]> Cc: Dave Hansen <[email protected]> Cc: David Rientjes <[email protected]> Cc: Greg Kroah-Hartman <[email protected]> Cc: Ingo Molnar <[email protected]> Cc: Jan Kara <[email protected]> Cc: Jérôme Glisse <[email protected]> Cc: "Kirill A. Shutemov" <[email protected]> Cc: Michal Hocko <[email protected]> Cc: Souptick Joarder <[email protected]> Cc: Steven Sistare <[email protected]> Cc: Vlastimil Babka <[email protected]> Cc: Wei Yang <[email protected]> Signed-off-by: Andrew Morton <[email protected]> Signed-off-by: Linus Torvalds <[email protected]>
1 parent 85c77f7 commit 2a3cb8b

File tree

4 files changed

+1
-267
lines changed

4 files changed

+1
-267
lines changed

include/linux/mm.h

Lines changed: 0 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -2665,12 +2665,6 @@ extern int randomize_va_space;
26652665
const char * arch_vma_name(struct vm_area_struct *vma);
26662666
void print_vma_addr(char *prefix, unsigned long rip);
26672667

2668-
void sparse_mem_maps_populate_node(struct page **map_map,
2669-
unsigned long pnum_begin,
2670-
unsigned long pnum_end,
2671-
unsigned long map_count,
2672-
int nodeid);
2673-
26742668
void *sparse_buffer_alloc(unsigned long size);
26752669
struct page *sparse_mem_map_populate(unsigned long pnum, int nid,
26762670
struct vmem_altmap *altmap);

mm/Kconfig

Lines changed: 0 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -118,10 +118,6 @@ config SPARSEMEM_EXTREME
118118
config SPARSEMEM_VMEMMAP_ENABLE
119119
bool
120120

121-
config SPARSEMEM_ALLOC_MEM_MAP_TOGETHER
122-
def_bool y
123-
depends on SPARSEMEM && X86_64
124-
125121
config SPARSEMEM_VMEMMAP
126122
bool "Sparse Memory virtual memmap"
127123
depends on SPARSEMEM && SPARSEMEM_VMEMMAP_ENABLE

mm/sparse-vmemmap.c

Lines changed: 0 additions & 21 deletions
Original file line numberDiff line numberDiff line change
@@ -261,24 +261,3 @@ struct page * __meminit sparse_mem_map_populate(unsigned long pnum, int nid,
261261

262262
return map;
263263
}
264-
265-
void __init sparse_mem_maps_populate_node(struct page **map_map,
266-
unsigned long pnum_begin,
267-
unsigned long pnum_end,
268-
unsigned long map_count, int nodeid)
269-
{
270-
unsigned long pnum;
271-
int nr_consumed_maps = 0;
272-
273-
for (pnum = pnum_begin; pnum < pnum_end; pnum++) {
274-
if (!present_section_nr(pnum))
275-
continue;
276-
277-
map_map[nr_consumed_maps] =
278-
sparse_mem_map_populate(pnum, nodeid, NULL);
279-
if (map_map[nr_consumed_maps++])
280-
continue;
281-
pr_err("%s: sparsemem memory map backing failed some memory will not be available\n",
282-
__func__);
283-
}
284-
}

mm/sparse.c

Lines changed: 1 addition & 236 deletions
Original file line numberDiff line numberDiff line change
@@ -205,12 +205,6 @@ static inline unsigned long first_present_section_nr(void)
205205
return next_present_section_nr(-1);
206206
}
207207

208-
/*
209-
* Record how many memory sections are marked as present
210-
* during system bootup.
211-
*/
212-
static int __initdata nr_present_sections;
213-
214208
/* Record a memory area against a node. */
215209
void __init memory_present(int nid, unsigned long start, unsigned long end)
216210
{
@@ -240,7 +234,6 @@ void __init memory_present(int nid, unsigned long start, unsigned long end)
240234
ms->section_mem_map = sparse_encode_early_nid(nid) |
241235
SECTION_IS_ONLINE;
242236
section_mark_present(ms);
243-
nr_present_sections++;
244237
}
245238
}
246239
}
@@ -377,37 +370,8 @@ static void __init check_usemap_section_nr(int nid, unsigned long *usemap)
377370
}
378371
#endif /* CONFIG_MEMORY_HOTREMOVE */
379372

380-
static void __init sparse_early_usemaps_alloc_node(void *data,
381-
unsigned long pnum_begin,
382-
unsigned long pnum_end,
383-
unsigned long usemap_count, int nodeid)
384-
{
385-
void *usemap;
386-
unsigned long pnum;
387-
unsigned long **usemap_map = (unsigned long **)data;
388-
int size = usemap_size();
389-
int nr_consumed_maps = 0;
390-
391-
usemap = sparse_early_usemaps_alloc_pgdat_section(NODE_DATA(nodeid),
392-
size * usemap_count);
393-
if (!usemap) {
394-
pr_warn("%s: allocation failed\n", __func__);
395-
return;
396-
}
397-
398-
for (pnum = pnum_begin; pnum < pnum_end; pnum++) {
399-
if (!present_section_nr(pnum))
400-
continue;
401-
usemap_map[nr_consumed_maps] = usemap;
402-
usemap += size;
403-
check_usemap_section_nr(nodeid, usemap_map[nr_consumed_maps]);
404-
nr_consumed_maps++;
405-
}
406-
}
407-
408373
#ifdef CONFIG_SPARSEMEM_VMEMMAP
409374
static unsigned long __init section_map_size(void)
410-
411375
{
412376
return ALIGN(sizeof(struct page) * PAGES_PER_SECTION, PMD_SIZE);
413377
}
@@ -432,25 +396,6 @@ struct page __init *sparse_mem_map_populate(unsigned long pnum, int nid,
432396
BOOTMEM_ALLOC_ACCESSIBLE, nid);
433397
return map;
434398
}
435-
void __init sparse_mem_maps_populate_node(struct page **map_map,
436-
unsigned long pnum_begin,
437-
unsigned long pnum_end,
438-
unsigned long map_count, int nodeid)
439-
{
440-
unsigned long pnum;
441-
int nr_consumed_maps = 0;
442-
443-
for (pnum = pnum_begin; pnum < pnum_end; pnum++) {
444-
if (!present_section_nr(pnum))
445-
continue;
446-
map_map[nr_consumed_maps] =
447-
sparse_mem_map_populate(pnum, nodeid, NULL);
448-
if (map_map[nr_consumed_maps++])
449-
continue;
450-
pr_err("%s: sparsemem memory map backing failed some memory will not be available\n",
451-
__func__);
452-
}
453-
}
454399
#endif /* !CONFIG_SPARSEMEM_VMEMMAP */
455400

456401
static void *sparsemap_buf __meminitdata;
@@ -489,190 +434,10 @@ void * __meminit sparse_buffer_alloc(unsigned long size)
489434
return ptr;
490435
}
491436

492-
#ifdef CONFIG_SPARSEMEM_ALLOC_MEM_MAP_TOGETHER
493-
static void __init sparse_early_mem_maps_alloc_node(void *data,
494-
unsigned long pnum_begin,
495-
unsigned long pnum_end,
496-
unsigned long map_count, int nodeid)
497-
{
498-
struct page **map_map = (struct page **)data;
499-
500-
sparse_buffer_init(section_map_size() * map_count, nodeid);
501-
sparse_mem_maps_populate_node(map_map, pnum_begin, pnum_end,
502-
map_count, nodeid);
503-
sparse_buffer_fini();
504-
}
505-
#else
506-
static struct page __init *sparse_early_mem_map_alloc(unsigned long pnum)
507-
{
508-
struct page *map;
509-
struct mem_section *ms = __nr_to_section(pnum);
510-
int nid = sparse_early_nid(ms);
511-
512-
map = sparse_mem_map_populate(pnum, nid, NULL);
513-
if (map)
514-
return map;
515-
516-
pr_err("%s: sparsemem memory map backing failed some memory will not be available\n",
517-
__func__);
518-
return NULL;
519-
}
520-
#endif
521-
522437
void __weak __meminit vmemmap_populate_print_last(void)
523438
{
524439
}
525440

526-
/**
527-
* alloc_usemap_and_memmap - memory alloction for pageblock flags and vmemmap
528-
* @map: usemap_map for pageblock flags or mmap_map for vmemmap
529-
* @unit_size: size of map unit
530-
*/
531-
static void __init alloc_usemap_and_memmap(void (*alloc_func)
532-
(void *, unsigned long, unsigned long,
533-
unsigned long, int), void *data,
534-
int data_unit_size)
535-
{
536-
unsigned long pnum;
537-
unsigned long map_count;
538-
int nodeid_begin = 0;
539-
unsigned long pnum_begin = 0;
540-
541-
for_each_present_section_nr(0, pnum) {
542-
struct mem_section *ms;
543-
544-
ms = __nr_to_section(pnum);
545-
nodeid_begin = sparse_early_nid(ms);
546-
pnum_begin = pnum;
547-
break;
548-
}
549-
map_count = 1;
550-
for_each_present_section_nr(pnum_begin + 1, pnum) {
551-
struct mem_section *ms;
552-
int nodeid;
553-
554-
ms = __nr_to_section(pnum);
555-
nodeid = sparse_early_nid(ms);
556-
if (nodeid == nodeid_begin) {
557-
map_count++;
558-
continue;
559-
}
560-
/* ok, we need to take cake of from pnum_begin to pnum - 1*/
561-
alloc_func(data, pnum_begin, pnum,
562-
map_count, nodeid_begin);
563-
/* new start, update count etc*/
564-
nodeid_begin = nodeid;
565-
pnum_begin = pnum;
566-
data += map_count * data_unit_size;
567-
map_count = 1;
568-
}
569-
/* ok, last chunk */
570-
alloc_func(data, pnum_begin, __highest_present_section_nr+1,
571-
map_count, nodeid_begin);
572-
}
573-
574-
/*
575-
* Allocate the accumulated non-linear sections, allocate a mem_map
576-
* for each and record the physical to section mapping.
577-
*/
578-
void __init sparse_init(void)
579-
{
580-
unsigned long pnum;
581-
struct page *map;
582-
unsigned long *usemap;
583-
unsigned long **usemap_map;
584-
int size;
585-
int nr_consumed_maps = 0;
586-
#ifdef CONFIG_SPARSEMEM_ALLOC_MEM_MAP_TOGETHER
587-
int size2;
588-
struct page **map_map;
589-
#endif
590-
591-
/* see include/linux/mmzone.h 'struct mem_section' definition */
592-
BUILD_BUG_ON(!is_power_of_2(sizeof(struct mem_section)));
593-
594-
/* Setup pageblock_order for HUGETLB_PAGE_SIZE_VARIABLE */
595-
set_pageblock_order();
596-
597-
/*
598-
* map is using big page (aka 2M in x86 64 bit)
599-
* usemap is less one page (aka 24 bytes)
600-
* so alloc 2M (with 2M align) and 24 bytes in turn will
601-
* make next 2M slip to one more 2M later.
602-
* then in big system, the memory will have a lot of holes...
603-
* here try to allocate 2M pages continuously.
604-
*
605-
* powerpc need to call sparse_init_one_section right after each
606-
* sparse_early_mem_map_alloc, so allocate usemap_map at first.
607-
*/
608-
size = sizeof(unsigned long *) * nr_present_sections;
609-
usemap_map = memblock_virt_alloc(size, 0);
610-
if (!usemap_map)
611-
panic("can not allocate usemap_map\n");
612-
alloc_usemap_and_memmap(sparse_early_usemaps_alloc_node,
613-
(void *)usemap_map,
614-
sizeof(usemap_map[0]));
615-
616-
#ifdef CONFIG_SPARSEMEM_ALLOC_MEM_MAP_TOGETHER
617-
size2 = sizeof(struct page *) * nr_present_sections;
618-
map_map = memblock_virt_alloc(size2, 0);
619-
if (!map_map)
620-
panic("can not allocate map_map\n");
621-
alloc_usemap_and_memmap(sparse_early_mem_maps_alloc_node,
622-
(void *)map_map,
623-
sizeof(map_map[0]));
624-
#endif
625-
626-
/*
627-
* The number of present sections stored in nr_present_sections
628-
* are kept the same since mem sections are marked as present in
629-
* memory_present(). In this for loop, we need check which sections
630-
* failed to allocate memmap or usemap, then clear its
631-
* ->section_mem_map accordingly. During this process, we need
632-
* increase 'nr_consumed_maps' whether its allocation of memmap
633-
* or usemap failed or not, so that after we handle the i-th
634-
* memory section, can get memmap and usemap of (i+1)-th section
635-
* correctly.
636-
*/
637-
for_each_present_section_nr(0, pnum) {
638-
struct mem_section *ms;
639-
640-
if (nr_consumed_maps >= nr_present_sections) {
641-
pr_err("nr_consumed_maps goes beyond nr_present_sections\n");
642-
break;
643-
}
644-
ms = __nr_to_section(pnum);
645-
usemap = usemap_map[nr_consumed_maps];
646-
if (!usemap) {
647-
ms->section_mem_map = 0;
648-
nr_consumed_maps++;
649-
continue;
650-
}
651-
652-
#ifdef CONFIG_SPARSEMEM_ALLOC_MEM_MAP_TOGETHER
653-
map = map_map[nr_consumed_maps];
654-
#else
655-
map = sparse_early_mem_map_alloc(pnum);
656-
#endif
657-
if (!map) {
658-
ms->section_mem_map = 0;
659-
nr_consumed_maps++;
660-
continue;
661-
}
662-
663-
sparse_init_one_section(__nr_to_section(pnum), pnum, map,
664-
usemap);
665-
nr_consumed_maps++;
666-
}
667-
668-
vmemmap_populate_print_last();
669-
670-
#ifdef CONFIG_SPARSEMEM_ALLOC_MEM_MAP_TOGETHER
671-
memblock_free_early(__pa(map_map), size2);
672-
#endif
673-
memblock_free_early(__pa(usemap_map), size);
674-
}
675-
676441
/*
677442
* Initialize sparse on a specific node. The node spans [pnum_begin, pnum_end)
678443
* And number of present sections in this node is map_count.
@@ -726,7 +491,7 @@ static void __init sparse_init_nid(int nid, unsigned long pnum_begin,
726491
* Allocate the accumulated non-linear sections, allocate a mem_map
727492
* for each and record the physical to section mapping.
728493
*/
729-
void __init new_sparse_init(void)
494+
void __init sparse_init(void)
730495
{
731496
unsigned long pnum_begin = first_present_section_nr();
732497
int nid_begin = sparse_early_nid(__nr_to_section(pnum_begin));

0 commit comments

Comments
 (0)