4
4
#include <linux/bootmem.h>
5
5
#include <linux/kasan.h>
6
6
#include <linux/kdebug.h>
7
+ #include <linux/memblock.h>
7
8
#include <linux/mm.h>
8
9
#include <linux/sched.h>
9
10
#include <linux/sched/task.h>
10
11
#include <linux/vmalloc.h>
11
12
12
13
#include <asm/e820/types.h>
14
+ #include <asm/pgalloc.h>
13
15
#include <asm/tlbflush.h>
14
16
#include <asm/sections.h>
15
17
#include <asm/pgtable.h>
@@ -18,15 +20,142 @@ extern struct range pfn_mapped[E820_MAX_ENTRIES];
18
20
19
21
static p4d_t tmp_p4d_table [PTRS_PER_P4D ] __initdata __aligned (PAGE_SIZE );
20
22
21
- static int __init map_range (struct range * range )
23
+ static __init void * early_alloc (size_t size , int nid )
24
+ {
25
+ return memblock_virt_alloc_try_nid_nopanic (size , size ,
26
+ __pa (MAX_DMA_ADDRESS ), BOOTMEM_ALLOC_ACCESSIBLE , nid );
27
+ }
28
+
29
+ static void __init kasan_populate_pmd (pmd_t * pmd , unsigned long addr ,
30
+ unsigned long end , int nid )
31
+ {
32
+ pte_t * pte ;
33
+
34
+ if (pmd_none (* pmd )) {
35
+ void * p ;
36
+
37
+ if (boot_cpu_has (X86_FEATURE_PSE ) &&
38
+ ((end - addr ) == PMD_SIZE ) &&
39
+ IS_ALIGNED (addr , PMD_SIZE )) {
40
+ p = early_alloc (PMD_SIZE , nid );
41
+ if (p && pmd_set_huge (pmd , __pa (p ), PAGE_KERNEL ))
42
+ return ;
43
+ else if (p )
44
+ memblock_free (__pa (p ), PMD_SIZE );
45
+ }
46
+
47
+ p = early_alloc (PAGE_SIZE , nid );
48
+ pmd_populate_kernel (& init_mm , pmd , p );
49
+ }
50
+
51
+ pte = pte_offset_kernel (pmd , addr );
52
+ do {
53
+ pte_t entry ;
54
+ void * p ;
55
+
56
+ if (!pte_none (* pte ))
57
+ continue ;
58
+
59
+ p = early_alloc (PAGE_SIZE , nid );
60
+ entry = pfn_pte (PFN_DOWN (__pa (p )), PAGE_KERNEL );
61
+ set_pte_at (& init_mm , addr , pte , entry );
62
+ } while (pte ++ , addr += PAGE_SIZE , addr != end );
63
+ }
64
+
65
+ static void __init kasan_populate_pud (pud_t * pud , unsigned long addr ,
66
+ unsigned long end , int nid )
67
+ {
68
+ pmd_t * pmd ;
69
+ unsigned long next ;
70
+
71
+ if (pud_none (* pud )) {
72
+ void * p ;
73
+
74
+ if (boot_cpu_has (X86_FEATURE_GBPAGES ) &&
75
+ ((end - addr ) == PUD_SIZE ) &&
76
+ IS_ALIGNED (addr , PUD_SIZE )) {
77
+ p = early_alloc (PUD_SIZE , nid );
78
+ if (p && pud_set_huge (pud , __pa (p ), PAGE_KERNEL ))
79
+ return ;
80
+ else if (p )
81
+ memblock_free (__pa (p ), PUD_SIZE );
82
+ }
83
+
84
+ p = early_alloc (PAGE_SIZE , nid );
85
+ pud_populate (& init_mm , pud , p );
86
+ }
87
+
88
+ pmd = pmd_offset (pud , addr );
89
+ do {
90
+ next = pmd_addr_end (addr , end );
91
+ if (!pmd_large (* pmd ))
92
+ kasan_populate_pmd (pmd , addr , next , nid );
93
+ } while (pmd ++ , addr = next , addr != end );
94
+ }
95
+
96
+ static void __init kasan_populate_p4d (p4d_t * p4d , unsigned long addr ,
97
+ unsigned long end , int nid )
98
+ {
99
+ pud_t * pud ;
100
+ unsigned long next ;
101
+
102
+ if (p4d_none (* p4d )) {
103
+ void * p = early_alloc (PAGE_SIZE , nid );
104
+
105
+ p4d_populate (& init_mm , p4d , p );
106
+ }
107
+
108
+ pud = pud_offset (p4d , addr );
109
+ do {
110
+ next = pud_addr_end (addr , end );
111
+ if (!pud_large (* pud ))
112
+ kasan_populate_pud (pud , addr , next , nid );
113
+ } while (pud ++ , addr = next , addr != end );
114
+ }
115
+
116
+ static void __init kasan_populate_pgd (pgd_t * pgd , unsigned long addr ,
117
+ unsigned long end , int nid )
118
+ {
119
+ void * p ;
120
+ p4d_t * p4d ;
121
+ unsigned long next ;
122
+
123
+ if (pgd_none (* pgd )) {
124
+ p = early_alloc (PAGE_SIZE , nid );
125
+ pgd_populate (& init_mm , pgd , p );
126
+ }
127
+
128
+ p4d = p4d_offset (pgd , addr );
129
+ do {
130
+ next = p4d_addr_end (addr , end );
131
+ kasan_populate_p4d (p4d , addr , next , nid );
132
+ } while (p4d ++ , addr = next , addr != end );
133
+ }
134
+
135
+ static void __init kasan_populate_shadow (unsigned long addr , unsigned long end ,
136
+ int nid )
137
+ {
138
+ pgd_t * pgd ;
139
+ unsigned long next ;
140
+
141
+ addr = addr & PAGE_MASK ;
142
+ end = round_up (end , PAGE_SIZE );
143
+ pgd = pgd_offset_k (addr );
144
+ do {
145
+ next = pgd_addr_end (addr , end );
146
+ kasan_populate_pgd (pgd , addr , next , nid );
147
+ } while (pgd ++ , addr = next , addr != end );
148
+ }
149
+
150
+ static void __init map_range (struct range * range )
22
151
{
23
152
unsigned long start ;
24
153
unsigned long end ;
25
154
26
155
start = (unsigned long )kasan_mem_to_shadow (pfn_to_kaddr (range -> start ));
27
156
end = (unsigned long )kasan_mem_to_shadow (pfn_to_kaddr (range -> end ));
28
157
29
- return vmemmap_populate (start , end , NUMA_NO_NODE );
158
+ kasan_populate_shadow (start , end , early_pfn_to_nid ( range -> start ) );
30
159
}
31
160
32
161
static void __init clear_pgds (unsigned long start ,
@@ -189,16 +318,16 @@ void __init kasan_init(void)
189
318
if (pfn_mapped [i ].end == 0 )
190
319
break ;
191
320
192
- if (map_range (& pfn_mapped [i ]))
193
- panic ("kasan: unable to allocate shadow!" );
321
+ map_range (& pfn_mapped [i ]);
194
322
}
323
+
195
324
kasan_populate_zero_shadow (
196
325
kasan_mem_to_shadow ((void * )PAGE_OFFSET + MAXMEM ),
197
326
kasan_mem_to_shadow ((void * )__START_KERNEL_map ));
198
327
199
- vmemmap_populate ((unsigned long )kasan_mem_to_shadow (_stext ),
200
- (unsigned long )kasan_mem_to_shadow (_end ),
201
- NUMA_NO_NODE );
328
+ kasan_populate_shadow ((unsigned long )kasan_mem_to_shadow (_stext ),
329
+ (unsigned long )kasan_mem_to_shadow (_end ),
330
+ early_pfn_to_nid ( __pa ( _stext )) );
202
331
203
332
kasan_populate_zero_shadow (kasan_mem_to_shadow ((void * )MODULES_END ),
204
333
(void * )KASAN_SHADOW_END );
0 commit comments