Skip to content

Commit 74e0817

Browse files
Suresh SiddhaH. Peter Anvin
authored andcommitted
x86-64: align RODATA kernel section to 2MB with CONFIG_DEBUG_RODATA
CONFIG_DEBUG_RODATA chops the large pages spanning boundaries of kernel text/rodata/data to small 4KB pages as they are mapped with different attributes (text as RO, RODATA as RO and NX etc). On x86_64, preserve the large page mappings for kernel text/rodata/data boundaries when CONFIG_DEBUG_RODATA is enabled. This is done by allowing the RODATA section to be hugepage aligned and having same RWX attributes for the 2MB page boundaries Extra Memory pages padding the sections will be freed during the end of the boot and the kernel identity mappings will have different RWX permissions compared to the kernel text mappings. Kernel identity mappings to these physical pages will be mapped with smaller pages but large page mappings are still retained for kernel text,rodata,data mappings. Signed-off-by: Suresh Siddha <[email protected]> LKML-Reference: <[email protected]> Signed-off-by: H. Peter Anvin <[email protected]>
1 parent b9af7c0 commit 74e0817

File tree

4 files changed

+50
-1
lines changed

4 files changed

+50
-1
lines changed

arch/x86/include/asm/sections.h

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,13 @@
22
#define _ASM_X86_SECTIONS_H
33

44
#include <asm-generic/sections.h>
5+
#include <asm/uaccess.h>
56

67
extern char __brk_base[], __brk_limit[];
8+
extern struct exception_table_entry __stop___ex_table[];
9+
10+
#if defined(CONFIG_X86_64) && defined(CONFIG_DEBUG_RODATA)
11+
extern char __end_rodata_hpage_align[];
12+
#endif
713

814
#endif /* _ASM_X86_SECTIONS_H */

arch/x86/kernel/vmlinux.lds.S

Lines changed: 17 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -41,6 +41,21 @@ ENTRY(phys_startup_64)
4141
jiffies_64 = jiffies;
4242
#endif
4343

44+
#if defined(CONFIG_X86_64) && defined(CONFIG_DEBUG_RODATA)
45+
46+
#define X64_ALIGN_DEBUG_RODATA_BEGIN . = ALIGN(HPAGE_SIZE);
47+
48+
#define X64_ALIGN_DEBUG_RODATA_END \
49+
. = ALIGN(HPAGE_SIZE); \
50+
__end_rodata_hpage_align = .;
51+
52+
#else
53+
54+
#define X64_ALIGN_DEBUG_RODATA_BEGIN
55+
#define X64_ALIGN_DEBUG_RODATA_END
56+
57+
#endif
58+
4459
PHDRS {
4560
text PT_LOAD FLAGS(5); /* R_E */
4661
data PT_LOAD FLAGS(7); /* RWE */
@@ -90,7 +105,9 @@ SECTIONS
90105

91106
EXCEPTION_TABLE(16) :text = 0x9090
92107

108+
X64_ALIGN_DEBUG_RODATA_BEGIN
93109
RO_DATA(PAGE_SIZE)
110+
X64_ALIGN_DEBUG_RODATA_END
94111

95112
/* Data */
96113
.data : AT(ADDR(.data) - LOAD_OFFSET) {

arch/x86/mm/init_64.c

Lines changed: 13 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -727,9 +727,13 @@ void set_kernel_text_ro(void)
727727

728728
void mark_rodata_ro(void)
729729
{
730-
unsigned long start = PFN_ALIGN(_text), end = PFN_ALIGN(__end_rodata);
730+
unsigned long start = PFN_ALIGN(_text);
731731
unsigned long rodata_start =
732732
((unsigned long)__start_rodata + PAGE_SIZE - 1) & PAGE_MASK;
733+
unsigned long end = (unsigned long) &__end_rodata_hpage_align;
734+
unsigned long text_end = PAGE_ALIGN((unsigned long) &__stop___ex_table);
735+
unsigned long rodata_end = PAGE_ALIGN((unsigned long) &__end_rodata);
736+
unsigned long data_start = (unsigned long) &_sdata;
733737

734738
printk(KERN_INFO "Write protecting the kernel read-only data: %luk\n",
735739
(end - start) >> 10);
@@ -752,6 +756,14 @@ void mark_rodata_ro(void)
752756
printk(KERN_INFO "Testing CPA: again\n");
753757
set_memory_ro(start, (end-start) >> PAGE_SHIFT);
754758
#endif
759+
760+
free_init_pages("unused kernel memory",
761+
(unsigned long) page_address(virt_to_page(text_end)),
762+
(unsigned long)
763+
page_address(virt_to_page(rodata_start)));
764+
free_init_pages("unused kernel memory",
765+
(unsigned long) page_address(virt_to_page(rodata_end)),
766+
(unsigned long) page_address(virt_to_page(data_start)));
755767
}
756768

757769
#endif

arch/x86/mm/pageattr.c

Lines changed: 14 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -279,6 +279,20 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
279279
__pa((unsigned long)__end_rodata) >> PAGE_SHIFT))
280280
pgprot_val(forbidden) |= _PAGE_RW;
281281

282+
#if defined(CONFIG_X86_64) && defined(CONFIG_DEBUG_RODATA)
283+
/*
284+
* Kernel text mappings for the large page aligned .rodata section
285+
* will be read-only. For the kernel identity mappings covering
286+
* the holes caused by this alignment can be anything.
287+
*
288+
* This will preserve the large page mappings for kernel text/data
289+
* at no extra cost.
290+
*/
291+
if (within(address, (unsigned long)_text,
292+
(unsigned long)__end_rodata_hpage_align))
293+
pgprot_val(forbidden) |= _PAGE_RW;
294+
#endif
295+
282296
prot = __pgprot(pgprot_val(prot) & ~pgprot_val(forbidden));
283297

284298
return prot;

0 commit comments

Comments
 (0)