Skip to content

Commit e17d802

Browse files
wildea01torvalds
authored andcommitted
arm64/mm/kasan: don't use vmemmap_populate() to initialize shadow
The kasan shadow is currently mapped using vmemmap_populate() since that provides a semi-convenient way to map pages into init_top_pgt. However, since that no longer zeroes the mapped pages, it is not suitable for kasan, which requires zeroed shadow memory. Add kasan_populate_shadow() interface and use it instead of vmemmap_populate(). Besides, this allows us to take advantage of gigantic pages and use them to populate the shadow, which should save us some memory wasted on page tables and reduce TLB pressure. Link: http://lkml.kernel.org/r/[email protected] Signed-off-by: Will Deacon <[email protected]> Signed-off-by: Pavel Tatashin <[email protected]> Cc: Andrey Ryabinin <[email protected]> Cc: Steven Sistare <[email protected]> Cc: Daniel Jordan <[email protected]> Cc: Bob Picco <[email protected]> Cc: Michal Hocko <[email protected]> Cc: Alexander Potapenko <[email protected]> Cc: Ard Biesheuvel <[email protected]> Cc: Catalin Marinas <[email protected]> Cc: Christian Borntraeger <[email protected]> Cc: David S. Miller <[email protected]> Cc: Dmitry Vyukov <[email protected]> Cc: Heiko Carstens <[email protected]> Cc: "H. Peter Anvin" <[email protected]> Cc: Ingo Molnar <[email protected]> Cc: Mark Rutland <[email protected]> Cc: Matthew Wilcox <[email protected]> Cc: Mel Gorman <[email protected]> Cc: Michal Hocko <[email protected]> Cc: Sam Ravnborg <[email protected]> Cc: Thomas Gleixner <[email protected]> Signed-off-by: Andrew Morton <[email protected]> Signed-off-by: Linus Torvalds <[email protected]>
1 parent d17a1d9 commit e17d802

File tree

2 files changed

+81
-51
lines changed

2 files changed

+81
-51
lines changed

arch/arm64/Kconfig

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -85,7 +85,7 @@ config ARM64
8585
select HAVE_ARCH_BITREVERSE
8686
select HAVE_ARCH_HUGE_VMAP
8787
select HAVE_ARCH_JUMP_LABEL
88-
select HAVE_ARCH_KASAN if SPARSEMEM_VMEMMAP && !(ARM64_16K_PAGES && ARM64_VA_BITS_48)
88+
select HAVE_ARCH_KASAN if !(ARM64_16K_PAGES && ARM64_VA_BITS_48)
8989
select HAVE_ARCH_KGDB
9090
select HAVE_ARCH_MMAP_RND_BITS
9191
select HAVE_ARCH_MMAP_RND_COMPAT_BITS if COMPAT

arch/arm64/mm/kasan_init.c

Lines changed: 80 additions & 50 deletions
Original file line numberDiff line numberDiff line change
@@ -11,6 +11,7 @@
1111
*/
1212

1313
#define pr_fmt(fmt) "kasan: " fmt
14+
#include <linux/bootmem.h>
1415
#include <linux/kasan.h>
1516
#include <linux/kernel.h>
1617
#include <linux/sched/task.h>
@@ -35,77 +36,117 @@ static pgd_t tmp_pg_dir[PTRS_PER_PGD] __initdata __aligned(PGD_SIZE);
3536
* with the physical address from __pa_symbol.
3637
*/
3738

38-
static void __init kasan_early_pte_populate(pmd_t *pmd, unsigned long addr,
39-
unsigned long end)
39+
static phys_addr_t __init kasan_alloc_zeroed_page(int node)
4040
{
41-
pte_t *pte;
42-
unsigned long next;
41+
void *p = memblock_virt_alloc_try_nid(PAGE_SIZE, PAGE_SIZE,
42+
__pa(MAX_DMA_ADDRESS),
43+
MEMBLOCK_ALLOC_ACCESSIBLE, node);
44+
return __pa(p);
45+
}
46+
47+
static pte_t *__init kasan_pte_offset(pmd_t *pmd, unsigned long addr, int node,
48+
bool early)
49+
{
50+
if (pmd_none(*pmd)) {
51+
phys_addr_t pte_phys = early ? __pa_symbol(kasan_zero_pte)
52+
: kasan_alloc_zeroed_page(node);
53+
__pmd_populate(pmd, pte_phys, PMD_TYPE_TABLE);
54+
}
55+
56+
return early ? pte_offset_kimg(pmd, addr)
57+
: pte_offset_kernel(pmd, addr);
58+
}
4359

44-
if (pmd_none(*pmd))
45-
__pmd_populate(pmd, __pa_symbol(kasan_zero_pte), PMD_TYPE_TABLE);
60+
static pmd_t *__init kasan_pmd_offset(pud_t *pud, unsigned long addr, int node,
61+
bool early)
62+
{
63+
if (pud_none(*pud)) {
64+
phys_addr_t pmd_phys = early ? __pa_symbol(kasan_zero_pmd)
65+
: kasan_alloc_zeroed_page(node);
66+
__pud_populate(pud, pmd_phys, PMD_TYPE_TABLE);
67+
}
68+
69+
return early ? pmd_offset_kimg(pud, addr) : pmd_offset(pud, addr);
70+
}
71+
72+
static pud_t *__init kasan_pud_offset(pgd_t *pgd, unsigned long addr, int node,
73+
bool early)
74+
{
75+
if (pgd_none(*pgd)) {
76+
phys_addr_t pud_phys = early ? __pa_symbol(kasan_zero_pud)
77+
: kasan_alloc_zeroed_page(node);
78+
__pgd_populate(pgd, pud_phys, PMD_TYPE_TABLE);
79+
}
80+
81+
return early ? pud_offset_kimg(pgd, addr) : pud_offset(pgd, addr);
82+
}
83+
84+
static void __init kasan_pte_populate(pmd_t *pmd, unsigned long addr,
85+
unsigned long end, int node, bool early)
86+
{
87+
unsigned long next;
88+
pte_t *pte = kasan_pte_offset(pmd, addr, node, early);
4689

47-
pte = pte_offset_kimg(pmd, addr);
4890
do {
91+
phys_addr_t page_phys = early ? __pa_symbol(kasan_zero_page)
92+
: kasan_alloc_zeroed_page(node);
4993
next = addr + PAGE_SIZE;
50-
set_pte(pte, pfn_pte(sym_to_pfn(kasan_zero_page),
51-
PAGE_KERNEL));
94+
set_pte(pte, pfn_pte(__phys_to_pfn(page_phys), PAGE_KERNEL));
5295
} while (pte++, addr = next, addr != end && pte_none(*pte));
5396
}
5497

55-
static void __init kasan_early_pmd_populate(pud_t *pud,
56-
unsigned long addr,
57-
unsigned long end)
98+
static void __init kasan_pmd_populate(pud_t *pud, unsigned long addr,
99+
unsigned long end, int node, bool early)
58100
{
59-
pmd_t *pmd;
60101
unsigned long next;
102+
pmd_t *pmd = kasan_pmd_offset(pud, addr, node, early);
61103

62-
if (pud_none(*pud))
63-
__pud_populate(pud, __pa_symbol(kasan_zero_pmd), PMD_TYPE_TABLE);
64-
65-
pmd = pmd_offset_kimg(pud, addr);
66104
do {
67105
next = pmd_addr_end(addr, end);
68-
kasan_early_pte_populate(pmd, addr, next);
106+
kasan_pte_populate(pmd, addr, next, node, early);
69107
} while (pmd++, addr = next, addr != end && pmd_none(*pmd));
70108
}
71109

72-
static void __init kasan_early_pud_populate(pgd_t *pgd,
73-
unsigned long addr,
74-
unsigned long end)
110+
static void __init kasan_pud_populate(pgd_t *pgd, unsigned long addr,
111+
unsigned long end, int node, bool early)
75112
{
76-
pud_t *pud;
77113
unsigned long next;
114+
pud_t *pud = kasan_pud_offset(pgd, addr, node, early);
78115

79-
if (pgd_none(*pgd))
80-
__pgd_populate(pgd, __pa_symbol(kasan_zero_pud), PUD_TYPE_TABLE);
81-
82-
pud = pud_offset_kimg(pgd, addr);
83116
do {
84117
next = pud_addr_end(addr, end);
85-
kasan_early_pmd_populate(pud, addr, next);
118+
kasan_pmd_populate(pud, addr, next, node, early);
86119
} while (pud++, addr = next, addr != end && pud_none(*pud));
87120
}
88121

89-
static void __init kasan_map_early_shadow(void)
122+
static void __init kasan_pgd_populate(unsigned long addr, unsigned long end,
123+
int node, bool early)
90124
{
91-
unsigned long addr = KASAN_SHADOW_START;
92-
unsigned long end = KASAN_SHADOW_END;
93125
unsigned long next;
94126
pgd_t *pgd;
95127

96128
pgd = pgd_offset_k(addr);
97129
do {
98130
next = pgd_addr_end(addr, end);
99-
kasan_early_pud_populate(pgd, addr, next);
131+
kasan_pud_populate(pgd, addr, next, node, early);
100132
} while (pgd++, addr = next, addr != end);
101133
}
102134

135+
/* The early shadow maps everything to a single page of zeroes */
103136
asmlinkage void __init kasan_early_init(void)
104137
{
105138
BUILD_BUG_ON(KASAN_SHADOW_OFFSET != KASAN_SHADOW_END - (1UL << 61));
106139
BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_START, PGDIR_SIZE));
107140
BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_END, PGDIR_SIZE));
108-
kasan_map_early_shadow();
141+
kasan_pgd_populate(KASAN_SHADOW_START, KASAN_SHADOW_END, NUMA_NO_NODE,
142+
true);
143+
}
144+
145+
/* Set up full kasan mappings, ensuring that the mapped pages are zeroed */
146+
static void __init kasan_map_populate(unsigned long start, unsigned long end,
147+
int node)
148+
{
149+
kasan_pgd_populate(start & PAGE_MASK, PAGE_ALIGN(end), node, false);
109150
}
110151

111152
/*
@@ -142,8 +183,8 @@ void __init kasan_init(void)
142183
struct memblock_region *reg;
143184
int i;
144185

145-
kimg_shadow_start = (u64)kasan_mem_to_shadow(_text);
146-
kimg_shadow_end = (u64)kasan_mem_to_shadow(_end);
186+
kimg_shadow_start = (u64)kasan_mem_to_shadow(_text) & PAGE_MASK;
187+
kimg_shadow_end = PAGE_ALIGN((u64)kasan_mem_to_shadow(_end));
147188

148189
mod_shadow_start = (u64)kasan_mem_to_shadow((void *)MODULES_VADDR);
149190
mod_shadow_end = (u64)kasan_mem_to_shadow((void *)MODULES_END);
@@ -161,19 +202,8 @@ void __init kasan_init(void)
161202

162203
clear_pgds(KASAN_SHADOW_START, KASAN_SHADOW_END);
163204

164-
vmemmap_populate(kimg_shadow_start, kimg_shadow_end,
165-
pfn_to_nid(virt_to_pfn(lm_alias(_text))));
166-
167-
/*
168-
* vmemmap_populate() has populated the shadow region that covers the
169-
* kernel image with SWAPPER_BLOCK_SIZE mappings, so we have to round
170-
* the start and end addresses to SWAPPER_BLOCK_SIZE as well, to prevent
171-
* kasan_populate_zero_shadow() from replacing the page table entries
172-
* (PMD or PTE) at the edges of the shadow region for the kernel
173-
* image.
174-
*/
175-
kimg_shadow_start = round_down(kimg_shadow_start, SWAPPER_BLOCK_SIZE);
176-
kimg_shadow_end = round_up(kimg_shadow_end, SWAPPER_BLOCK_SIZE);
205+
kasan_map_populate(kimg_shadow_start, kimg_shadow_end,
206+
pfn_to_nid(virt_to_pfn(lm_alias(_text))));
177207

178208
kasan_populate_zero_shadow((void *)KASAN_SHADOW_START,
179209
(void *)mod_shadow_start);
@@ -191,9 +221,9 @@ void __init kasan_init(void)
191221
if (start >= end)
192222
break;
193223

194-
vmemmap_populate((unsigned long)kasan_mem_to_shadow(start),
195-
(unsigned long)kasan_mem_to_shadow(end),
196-
pfn_to_nid(virt_to_pfn(start)));
224+
kasan_map_populate((unsigned long)kasan_mem_to_shadow(start),
225+
(unsigned long)kasan_mem_to_shadow(end),
226+
pfn_to_nid(virt_to_pfn(start)));
197227
}
198228

199229
/*

0 commit comments

Comments
 (0)