Skip to content

Commit da14170

Browse files
labbottctmarinas
authored andcommitted
arm64: add better page protections to arm64
Add page protections for arm64 similar to those in arm. This is for security reasons to prevent certain classes of exploits. The current method: - Map all memory as either RWX or RW. We round to the nearest section to avoid creating page tables before everything is mapped - Once everything is mapped, if either end of the RWX section should not be X, we split the PMD and remap as necessary - When initmem is to be freed, we change the permissions back to RW (using stop machine if necessary to flush the TLB) - If CONFIG_DEBUG_RODATA is set, the read only sections are set read only. Acked-by: Ard Biesheuvel <[email protected]> Tested-by: Kees Cook <[email protected]> Tested-by: Ard Biesheuvel <[email protected]> Signed-off-by: Laura Abbott <[email protected]> Signed-off-by: Catalin Marinas <[email protected]>
1 parent 2f896d5 commit da14170

File tree

6 files changed

+233
-26
lines changed

6 files changed

+233
-26
lines changed

arch/arm64/Kconfig.debug

Lines changed: 23 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -66,4 +66,27 @@ config DEBUG_SET_MODULE_RONX
6666
against certain classes of kernel exploits.
6767
If in doubt, say "N".
6868

69+
config DEBUG_RODATA
70+
bool "Make kernel text and rodata read-only"
71+
help
72+
If this is set, kernel text and rodata will be made read-only. This
73+
is to help catch accidental or malicious attempts to change the
74+
kernel's executable code. Additionally splits rodata from kernel
75+
text so it can be made explicitly non-executable.
76+
77+
If in doubt, say Y
78+
79+
config DEBUG_ALIGN_RODATA
80+
depends on DEBUG_RODATA && !ARM64_64K_PAGES
81+
bool "Align linker sections up to SECTION_SIZE"
82+
help
83+
If this option is enabled, sections that may potentially be marked as
84+
read only or non-executable will be aligned up to the section size of
85+
the kernel. This prevents sections from being split into pages and
86+
avoids a potential TLB penalty. The downside is an increase in
87+
alignment and potentially wasted space. Turn on this option if
88+
performance is more important than memory pressure.
89+
90+
If in doubt, say N
91+
6992
endmenu

arch/arm64/include/asm/cacheflush.h

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -152,4 +152,9 @@ int set_memory_ro(unsigned long addr, int numpages);
152152
int set_memory_rw(unsigned long addr, int numpages);
153153
int set_memory_x(unsigned long addr, int numpages);
154154
int set_memory_nx(unsigned long addr, int numpages);
155+
156+
#ifdef CONFIG_DEBUG_RODATA
157+
void mark_rodata_ro(void);
158+
#endif
159+
155160
#endif

arch/arm64/kernel/vmlinux.lds.S

Lines changed: 15 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -8,6 +8,7 @@
88
#include <asm/thread_info.h>
99
#include <asm/memory.h>
1010
#include <asm/page.h>
11+
#include <asm/pgtable.h>
1112

1213
#include "image.h"
1314

@@ -49,6 +50,14 @@ PECOFF_FILE_ALIGNMENT = 0x200;
4950
#define PECOFF_EDATA_PADDING
5051
#endif
5152

53+
#ifdef CONFIG_DEBUG_ALIGN_RODATA
54+
#define ALIGN_DEBUG_RO . = ALIGN(1<<SECTION_SHIFT);
55+
#define ALIGN_DEBUG_RO_MIN(min) ALIGN_DEBUG_RO
56+
#else
57+
#define ALIGN_DEBUG_RO
58+
#define ALIGN_DEBUG_RO_MIN(min) . = ALIGN(min);
59+
#endif
60+
5261
SECTIONS
5362
{
5463
/*
@@ -71,6 +80,7 @@ SECTIONS
7180
_text = .;
7281
HEAD_TEXT
7382
}
83+
ALIGN_DEBUG_RO
7484
.text : { /* Real text segment */
7585
_stext = .; /* Text and read-only data */
7686
__exception_text_start = .;
@@ -87,19 +97,22 @@ SECTIONS
8797
*(.got) /* Global offset table */
8898
}
8999

100+
ALIGN_DEBUG_RO
90101
RO_DATA(PAGE_SIZE)
91102
EXCEPTION_TABLE(8)
92103
NOTES
104+
ALIGN_DEBUG_RO
93105
_etext = .; /* End of text and rodata section */
94106

95-
. = ALIGN(PAGE_SIZE);
107+
ALIGN_DEBUG_RO_MIN(PAGE_SIZE)
96108
__init_begin = .;
97109

98110
INIT_TEXT_SECTION(8)
99111
.exit.text : {
100112
ARM_EXIT_KEEP(EXIT_TEXT)
101113
}
102-
. = ALIGN(16);
114+
115+
ALIGN_DEBUG_RO_MIN(16)
103116
.init.data : {
104117
INIT_DATA
105118
INIT_SETUP(16)

arch/arm64/mm/init.c

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -344,6 +344,7 @@ void __init mem_init(void)
344344

345345
void free_initmem(void)
346346
{
347+
fixup_init();
347348
free_initmem_default(0);
348349
free_alternatives_memory();
349350
}

arch/arm64/mm/mm.h

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1 +1,3 @@
11
extern void __init bootmem_init(void);
2+
3+
void fixup_init(void);

0 commit comments

Comments
 (0)