Skip to content

Commit b741596

Browse files
committed
Merge tag 'riscv-for-linus-5.13-mw1' of git://git.kernel.org/pub/scm/linux/kernel/git/riscv/linux
Pull RISC-V fixes from Palmer Dabbelt: - A fix to avoid over-allocating the kernel's mapping on !MMU systems, which could lead to up to 2MiB of lost memory - The SiFive address extension errata only manifest on rv64, they are now disabled on rv32 where they are unnecessary - A pair of late-landing cleanups * tag 'riscv-for-linus-5.13-mw1' of git://git.kernel.org/pub/scm/linux/kernel/git/riscv/linux: riscv: remove unused handle_exception symbol riscv: Consistify protect_kernel_linear_mapping_text_rodata() use riscv: enable SiFive errata CIP-453 and CIP-1200 Kconfig only if CONFIG_64BIT=y riscv: Only extend kernel reservation if mapped read-only
2 parents fec4d42 + beaf5ae commit b741596

File tree

5 files changed

+16
-10
lines changed

5 files changed

+16
-10
lines changed

arch/riscv/Kconfig.erratas

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -21,7 +21,7 @@ config ERRATA_SIFIVE
2121

2222
config ERRATA_SIFIVE_CIP_453
2323
bool "Apply SiFive errata CIP-453"
24-
depends on ERRATA_SIFIVE
24+
depends on ERRATA_SIFIVE && 64BIT
2525
default y
2626
help
2727
This will apply the SiFive CIP-453 errata to add sign extension
@@ -32,7 +32,7 @@ config ERRATA_SIFIVE_CIP_453
3232

3333
config ERRATA_SIFIVE_CIP_1200
3434
bool "Apply SiFive errata CIP-1200"
35-
depends on ERRATA_SIFIVE
35+
depends on ERRATA_SIFIVE && 64BIT
3636
default y
3737
help
3838
This will apply the SiFive CIP-1200 errata to repalce all

arch/riscv/include/asm/set_memory.h

Lines changed: 6 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -17,7 +17,6 @@ int set_memory_x(unsigned long addr, int numpages);
1717
int set_memory_nx(unsigned long addr, int numpages);
1818
int set_memory_rw_nx(unsigned long addr, int numpages);
1919
void protect_kernel_text_data(void);
20-
void protect_kernel_linear_mapping_text_rodata(void);
2120
#else
2221
static inline int set_memory_ro(unsigned long addr, int numpages) { return 0; }
2322
static inline int set_memory_rw(unsigned long addr, int numpages) { return 0; }
@@ -27,6 +26,12 @@ static inline void protect_kernel_text_data(void) {}
2726
static inline int set_memory_rw_nx(unsigned long addr, int numpages) { return 0; }
2827
#endif
2928

29+
#if defined(CONFIG_64BIT) && defined(CONFIG_STRICT_KERNEL_RWX)
30+
void protect_kernel_linear_mapping_text_rodata(void);
31+
#else
32+
static inline void protect_kernel_linear_mapping_text_rodata(void) {}
33+
#endif
34+
3035
int set_direct_map_invalid_noflush(struct page *page);
3136
int set_direct_map_default_noflush(struct page *page);
3237
bool kernel_page_present(struct page *page);

arch/riscv/kernel/setup.c

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -293,9 +293,7 @@ void __init setup_arch(char **cmdline_p)
293293

294294
if (IS_ENABLED(CONFIG_STRICT_KERNEL_RWX)) {
295295
protect_kernel_text_data();
296-
#if defined(CONFIG_64BIT) && defined(CONFIG_MMU) && !defined(CONFIG_XIP_KERNEL)
297296
protect_kernel_linear_mapping_text_rodata();
298-
#endif
299297
}
300298

301299
#ifdef CONFIG_SWIOTLB

arch/riscv/kernel/traps.c

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -25,8 +25,6 @@
2525

2626
int show_unhandled_signals = 1;
2727

28-
extern asmlinkage void handle_exception(void);
29-
3028
static DEFINE_SPINLOCK(die_lock);
3129

3230
void die(struct pt_regs *regs, const char *str)

arch/riscv/mm/init.c

Lines changed: 8 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -135,11 +135,16 @@ void __init setup_bootmem(void)
135135

136136
/*
137137
* Reserve from the start of the kernel to the end of the kernel
138-
* and make sure we align the reservation on PMD_SIZE since we will
138+
*/
139+
#if defined(CONFIG_64BIT) && defined(CONFIG_STRICT_KERNEL_RWX)
140+
/*
141+
* Make sure we align the reservation on PMD_SIZE since we will
139142
* map the kernel in the linear mapping as read-only: we do not want
140143
* any allocation to happen between _end and the next pmd aligned page.
141144
*/
142-
memblock_reserve(vmlinux_start, (vmlinux_end - vmlinux_start + PMD_SIZE - 1) & PMD_MASK);
145+
vmlinux_end = (vmlinux_end + PMD_SIZE - 1) & PMD_MASK;
146+
#endif
147+
memblock_reserve(vmlinux_start, vmlinux_end - vmlinux_start);
143148

144149
/*
145150
* memblock allocator is not aware of the fact that last 4K bytes of
@@ -640,7 +645,7 @@ asmlinkage void __init setup_vm(uintptr_t dtb_pa)
640645
#endif
641646
}
642647

643-
#if defined(CONFIG_64BIT) && !defined(CONFIG_XIP_KERNEL)
648+
#if defined(CONFIG_64BIT) && defined(CONFIG_STRICT_KERNEL_RWX)
644649
void protect_kernel_linear_mapping_text_rodata(void)
645650
{
646651
unsigned long text_start = (unsigned long)lm_alias(_start);

0 commit comments

Comments
 (0)