@@ -439,8 +439,8 @@ static unsigned long __init section_map_size(void)
439
439
return PAGE_ALIGN (sizeof (struct page ) * PAGES_PER_SECTION );
440
440
}
441
441
442
- struct page __init * sparse_mem_map_populate (unsigned long pnum , int nid ,
443
- struct vmem_altmap * altmap )
442
+ struct page __init * __populate_section_memmap (unsigned long pfn ,
443
+ unsigned long nr_pages , int nid , struct vmem_altmap * altmap )
444
444
{
445
445
unsigned long size = section_map_size ();
446
446
struct page * map = sparse_buffer_alloc (size );
@@ -521,10 +521,13 @@ static void __init sparse_init_nid(int nid, unsigned long pnum_begin,
521
521
}
522
522
sparse_buffer_init (map_count * section_map_size (), nid );
523
523
for_each_present_section_nr (pnum_begin , pnum ) {
524
+ unsigned long pfn = section_nr_to_pfn (pnum );
525
+
524
526
if (pnum >= pnum_end )
525
527
break ;
526
528
527
- map = sparse_mem_map_populate (pnum , nid , NULL );
529
+ map = __populate_section_memmap (pfn , PAGES_PER_SECTION ,
530
+ nid , NULL );
528
531
if (!map ) {
529
532
pr_err ("%s: node[%d] memory map backing failed. Some memory will not be available." ,
530
533
__func__ , nid );
@@ -625,17 +628,17 @@ void offline_mem_sections(unsigned long start_pfn, unsigned long end_pfn)
625
628
#endif
626
629
627
630
#ifdef CONFIG_SPARSEMEM_VMEMMAP
628
- static inline struct page * kmalloc_section_memmap (unsigned long pnum , int nid ,
629
- struct vmem_altmap * altmap )
631
+ static struct page * populate_section_memmap (unsigned long pfn ,
632
+ unsigned long nr_pages , int nid , struct vmem_altmap * altmap )
630
633
{
631
- /* This will make the necessary allocations eventually. */
632
- return sparse_mem_map_populate (pnum , nid , altmap );
634
+ return __populate_section_memmap (pfn , nr_pages , nid , altmap );
633
635
}
634
- static void __kfree_section_memmap (struct page * memmap ,
636
+
637
+ static void depopulate_section_memmap (unsigned long pfn , unsigned long nr_pages ,
635
638
struct vmem_altmap * altmap )
636
639
{
637
- unsigned long start = (unsigned long )memmap ;
638
- unsigned long end = ( unsigned long )( memmap + PAGES_PER_SECTION );
640
+ unsigned long start = (unsigned long ) pfn_to_page ( pfn ) ;
641
+ unsigned long end = start + nr_pages * sizeof ( struct page );
639
642
640
643
vmemmap_free (start , end , altmap );
641
644
}
@@ -647,7 +650,8 @@ static void free_map_bootmem(struct page *memmap)
647
650
vmemmap_free (start , end , NULL );
648
651
}
649
652
#else
650
- static struct page * __kmalloc_section_memmap (void )
653
+ struct page * populate_section_memmap (unsigned long pfn ,
654
+ unsigned long nr_pages , int nid , struct vmem_altmap * altmap )
651
655
{
652
656
struct page * page , * ret ;
653
657
unsigned long memmap_size = sizeof (struct page ) * PAGES_PER_SECTION ;
@@ -668,15 +672,11 @@ static struct page *__kmalloc_section_memmap(void)
668
672
return ret ;
669
673
}
670
674
671
- static inline struct page * kmalloc_section_memmap (unsigned long pnum , int nid ,
675
+ static void depopulate_section_memmap (unsigned long pfn , unsigned long nr_pages ,
672
676
struct vmem_altmap * altmap )
673
677
{
674
- return __kmalloc_section_memmap ();
675
- }
678
+ struct page * memmap = pfn_to_page (pfn );
676
679
677
- static void __kfree_section_memmap (struct page * memmap ,
678
- struct vmem_altmap * altmap )
679
- {
680
680
if (is_vmalloc_addr (memmap ))
681
681
vfree (memmap );
682
682
else
@@ -745,12 +745,13 @@ int __meminit sparse_add_one_section(int nid, unsigned long start_pfn,
745
745
if (ret < 0 && ret != - EEXIST )
746
746
return ret ;
747
747
ret = 0 ;
748
- memmap = kmalloc_section_memmap (section_nr , nid , altmap );
748
+ memmap = populate_section_memmap (start_pfn , PAGES_PER_SECTION , nid ,
749
+ altmap );
749
750
if (!memmap )
750
751
return - ENOMEM ;
751
752
usage = kzalloc (mem_section_usage_size (), GFP_KERNEL );
752
753
if (!usage ) {
753
- __kfree_section_memmap ( memmap , altmap );
754
+ depopulate_section_memmap ( start_pfn , PAGES_PER_SECTION , altmap );
754
755
return - ENOMEM ;
755
756
}
756
757
@@ -773,7 +774,7 @@ int __meminit sparse_add_one_section(int nid, unsigned long start_pfn,
773
774
out :
774
775
if (ret < 0 ) {
775
776
kfree (usage );
776
- __kfree_section_memmap ( memmap , altmap );
777
+ depopulate_section_memmap ( start_pfn , PAGES_PER_SECTION , altmap );
777
778
}
778
779
return ret ;
779
780
}
@@ -809,7 +810,8 @@ static inline void clear_hwpoisoned_pages(struct page *memmap, int nr_pages)
809
810
#endif
810
811
811
812
static void free_section_usage (struct mem_section * ms , struct page * memmap ,
812
- struct mem_section_usage * usage , struct vmem_altmap * altmap )
813
+ struct mem_section_usage * usage , unsigned long pfn ,
814
+ unsigned long nr_pages , struct vmem_altmap * altmap )
813
815
{
814
816
if (!usage )
815
817
return ;
@@ -820,7 +822,7 @@ static void free_section_usage(struct mem_section *ms, struct page *memmap,
820
822
if (!early_section (ms )) {
821
823
kfree (usage );
822
824
if (memmap )
823
- __kfree_section_memmap ( memmap , altmap );
825
+ depopulate_section_memmap ( pfn , nr_pages , altmap );
824
826
return ;
825
827
}
826
828
@@ -849,6 +851,8 @@ void sparse_remove_one_section(struct mem_section *ms, unsigned long map_offset,
849
851
850
852
clear_hwpoisoned_pages (memmap + map_offset ,
851
853
PAGES_PER_SECTION - map_offset );
852
- free_section_usage (ms , memmap , usage , altmap );
854
+ free_section_usage (ms , memmap , usage ,
855
+ section_nr_to_pfn (__section_nr (ms )),
856
+ PAGES_PER_SECTION , altmap );
853
857
}
854
858
#endif /* CONFIG_MEMORY_HOTPLUG */
0 commit comments