11
11
*/
12
12
13
13
#define pr_fmt (fmt ) "kasan: " fmt
14
+ #include <linux/bootmem.h>
14
15
#include <linux/kasan.h>
15
16
#include <linux/kernel.h>
16
17
#include <linux/sched/task.h>
@@ -35,77 +36,117 @@ static pgd_t tmp_pg_dir[PTRS_PER_PGD] __initdata __aligned(PGD_SIZE);
35
36
* with the physical address from __pa_symbol.
36
37
*/
37
38
38
- static void __init kasan_early_pte_populate (pmd_t * pmd , unsigned long addr ,
39
- unsigned long end )
39
+ static phys_addr_t __init kasan_alloc_zeroed_page (int node )
40
40
{
41
- pte_t * pte ;
42
- unsigned long next ;
41
+ void * p = memblock_virt_alloc_try_nid (PAGE_SIZE , PAGE_SIZE ,
42
+ __pa (MAX_DMA_ADDRESS ),
43
+ MEMBLOCK_ALLOC_ACCESSIBLE , node );
44
+ return __pa (p );
45
+ }
46
+
47
+ static pte_t * __init kasan_pte_offset (pmd_t * pmd , unsigned long addr , int node ,
48
+ bool early )
49
+ {
50
+ if (pmd_none (* pmd )) {
51
+ phys_addr_t pte_phys = early ? __pa_symbol (kasan_zero_pte )
52
+ : kasan_alloc_zeroed_page (node );
53
+ __pmd_populate (pmd , pte_phys , PMD_TYPE_TABLE );
54
+ }
55
+
56
+ return early ? pte_offset_kimg (pmd , addr )
57
+ : pte_offset_kernel (pmd , addr );
58
+ }
43
59
44
- if (pmd_none (* pmd ))
45
- __pmd_populate (pmd , __pa_symbol (kasan_zero_pte ), PMD_TYPE_TABLE );
60
+ static pmd_t * __init kasan_pmd_offset (pud_t * pud , unsigned long addr , int node ,
61
+ bool early )
62
+ {
63
+ if (pud_none (* pud )) {
64
+ phys_addr_t pmd_phys = early ? __pa_symbol (kasan_zero_pmd )
65
+ : kasan_alloc_zeroed_page (node );
66
+ __pud_populate (pud , pmd_phys , PMD_TYPE_TABLE );
67
+ }
68
+
69
+ return early ? pmd_offset_kimg (pud , addr ) : pmd_offset (pud , addr );
70
+ }
71
+
72
+ static pud_t * __init kasan_pud_offset (pgd_t * pgd , unsigned long addr , int node ,
73
+ bool early )
74
+ {
75
+ if (pgd_none (* pgd )) {
76
+ phys_addr_t pud_phys = early ? __pa_symbol (kasan_zero_pud )
77
+ : kasan_alloc_zeroed_page (node );
78
+ __pgd_populate (pgd , pud_phys , PMD_TYPE_TABLE );
79
+ }
80
+
81
+ return early ? pud_offset_kimg (pgd , addr ) : pud_offset (pgd , addr );
82
+ }
83
+
84
+ static void __init kasan_pte_populate (pmd_t * pmd , unsigned long addr ,
85
+ unsigned long end , int node , bool early )
86
+ {
87
+ unsigned long next ;
88
+ pte_t * pte = kasan_pte_offset (pmd , addr , node , early );
46
89
47
- pte = pte_offset_kimg (pmd , addr );
48
90
do {
91
+ phys_addr_t page_phys = early ? __pa_symbol (kasan_zero_page )
92
+ : kasan_alloc_zeroed_page (node );
49
93
next = addr + PAGE_SIZE ;
50
- set_pte (pte , pfn_pte (sym_to_pfn (kasan_zero_page ),
51
- PAGE_KERNEL ));
94
+ set_pte (pte , pfn_pte (__phys_to_pfn (page_phys ), PAGE_KERNEL ));
52
95
} while (pte ++ , addr = next , addr != end && pte_none (* pte ));
53
96
}
54
97
55
- static void __init kasan_early_pmd_populate (pud_t * pud ,
56
- unsigned long addr ,
57
- unsigned long end )
98
+ static void __init kasan_pmd_populate (pud_t * pud , unsigned long addr ,
99
+ unsigned long end , int node , bool early )
58
100
{
59
- pmd_t * pmd ;
60
101
unsigned long next ;
102
+ pmd_t * pmd = kasan_pmd_offset (pud , addr , node , early );
61
103
62
- if (pud_none (* pud ))
63
- __pud_populate (pud , __pa_symbol (kasan_zero_pmd ), PMD_TYPE_TABLE );
64
-
65
- pmd = pmd_offset_kimg (pud , addr );
66
104
do {
67
105
next = pmd_addr_end (addr , end );
68
- kasan_early_pte_populate (pmd , addr , next );
106
+ kasan_pte_populate (pmd , addr , next , node , early );
69
107
} while (pmd ++ , addr = next , addr != end && pmd_none (* pmd ));
70
108
}
71
109
72
- static void __init kasan_early_pud_populate (pgd_t * pgd ,
73
- unsigned long addr ,
74
- unsigned long end )
110
+ static void __init kasan_pud_populate (pgd_t * pgd , unsigned long addr ,
111
+ unsigned long end , int node , bool early )
75
112
{
76
- pud_t * pud ;
77
113
unsigned long next ;
114
+ pud_t * pud = kasan_pud_offset (pgd , addr , node , early );
78
115
79
- if (pgd_none (* pgd ))
80
- __pgd_populate (pgd , __pa_symbol (kasan_zero_pud ), PUD_TYPE_TABLE );
81
-
82
- pud = pud_offset_kimg (pgd , addr );
83
116
do {
84
117
next = pud_addr_end (addr , end );
85
- kasan_early_pmd_populate (pud , addr , next );
118
+ kasan_pmd_populate (pud , addr , next , node , early );
86
119
} while (pud ++ , addr = next , addr != end && pud_none (* pud ));
87
120
}
88
121
89
- static void __init kasan_map_early_shadow (void )
122
+ static void __init kasan_pgd_populate (unsigned long addr , unsigned long end ,
123
+ int node , bool early )
90
124
{
91
- unsigned long addr = KASAN_SHADOW_START ;
92
- unsigned long end = KASAN_SHADOW_END ;
93
125
unsigned long next ;
94
126
pgd_t * pgd ;
95
127
96
128
pgd = pgd_offset_k (addr );
97
129
do {
98
130
next = pgd_addr_end (addr , end );
99
- kasan_early_pud_populate (pgd , addr , next );
131
+ kasan_pud_populate (pgd , addr , next , node , early );
100
132
} while (pgd ++ , addr = next , addr != end );
101
133
}
102
134
135
+ /* The early shadow maps everything to a single page of zeroes */
103
136
asmlinkage void __init kasan_early_init (void )
104
137
{
105
138
BUILD_BUG_ON (KASAN_SHADOW_OFFSET != KASAN_SHADOW_END - (1UL << 61 ));
106
139
BUILD_BUG_ON (!IS_ALIGNED (KASAN_SHADOW_START , PGDIR_SIZE ));
107
140
BUILD_BUG_ON (!IS_ALIGNED (KASAN_SHADOW_END , PGDIR_SIZE ));
108
- kasan_map_early_shadow ();
141
+ kasan_pgd_populate (KASAN_SHADOW_START , KASAN_SHADOW_END , NUMA_NO_NODE ,
142
+ true);
143
+ }
144
+
145
+ /* Set up full kasan mappings, ensuring that the mapped pages are zeroed */
146
+ static void __init kasan_map_populate (unsigned long start , unsigned long end ,
147
+ int node )
148
+ {
149
+ kasan_pgd_populate (start & PAGE_MASK , PAGE_ALIGN (end ), node , false);
109
150
}
110
151
111
152
/*
@@ -142,8 +183,8 @@ void __init kasan_init(void)
142
183
struct memblock_region * reg ;
143
184
int i ;
144
185
145
- kimg_shadow_start = (u64 )kasan_mem_to_shadow (_text );
146
- kimg_shadow_end = ( u64 )kasan_mem_to_shadow (_end );
186
+ kimg_shadow_start = (u64 )kasan_mem_to_shadow (_text ) & PAGE_MASK ;
187
+ kimg_shadow_end = PAGE_ALIGN (( u64 )kasan_mem_to_shadow (_end ) );
147
188
148
189
mod_shadow_start = (u64 )kasan_mem_to_shadow ((void * )MODULES_VADDR );
149
190
mod_shadow_end = (u64 )kasan_mem_to_shadow ((void * )MODULES_END );
@@ -161,19 +202,8 @@ void __init kasan_init(void)
161
202
162
203
clear_pgds (KASAN_SHADOW_START , KASAN_SHADOW_END );
163
204
164
- vmemmap_populate (kimg_shadow_start , kimg_shadow_end ,
165
- pfn_to_nid (virt_to_pfn (lm_alias (_text ))));
166
-
167
- /*
168
- * vmemmap_populate() has populated the shadow region that covers the
169
- * kernel image with SWAPPER_BLOCK_SIZE mappings, so we have to round
170
- * the start and end addresses to SWAPPER_BLOCK_SIZE as well, to prevent
171
- * kasan_populate_zero_shadow() from replacing the page table entries
172
- * (PMD or PTE) at the edges of the shadow region for the kernel
173
- * image.
174
- */
175
- kimg_shadow_start = round_down (kimg_shadow_start , SWAPPER_BLOCK_SIZE );
176
- kimg_shadow_end = round_up (kimg_shadow_end , SWAPPER_BLOCK_SIZE );
205
+ kasan_map_populate (kimg_shadow_start , kimg_shadow_end ,
206
+ pfn_to_nid (virt_to_pfn (lm_alias (_text ))));
177
207
178
208
kasan_populate_zero_shadow ((void * )KASAN_SHADOW_START ,
179
209
(void * )mod_shadow_start );
@@ -191,9 +221,9 @@ void __init kasan_init(void)
191
221
if (start >= end )
192
222
break ;
193
223
194
- vmemmap_populate ((unsigned long )kasan_mem_to_shadow (start ),
195
- (unsigned long )kasan_mem_to_shadow (end ),
196
- pfn_to_nid (virt_to_pfn (start )));
224
+ kasan_map_populate ((unsigned long )kasan_mem_to_shadow (start ),
225
+ (unsigned long )kasan_mem_to_shadow (end ),
226
+ pfn_to_nid (virt_to_pfn (start )));
197
227
}
198
228
199
229
/*
0 commit comments