Skip to content

Commit 7dd01ae

Browse files
Andre-ARMctmarinas
authored andcommitted
arm64: trap userspace "dc cvau" cache operation on errata-affected core
The ARM errata 819472, 826319, 827319 and 824069 for affected Cortex-A53 cores demand to promote "dc cvau" instructions to "dc civac". Since we allow userspace to also emit those instructions, we should make sure that "dc cvau" gets promoted there too. So lets grasp the nettle here and actually trap every userland cache maintenance instruction once we detect at least one affected core in the system. We then emulate the instruction by executing it on behalf of userland, promoting "dc cvau" to "dc civac" on the way and injecting access fault back into userspace. Signed-off-by: Andre Przywara <[email protected]> [[email protected]: s/set_segfault/arm64_notify_segfault/] Signed-off-by: Catalin Marinas <[email protected]>
1 parent 390bf17 commit 7dd01ae

File tree

5 files changed

+75
-2
lines changed

5 files changed

+75
-2
lines changed

arch/arm64/include/asm/processor.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -192,5 +192,6 @@ static inline void spin_lock_prefetch(const void *ptr)
192192

193193
void cpu_enable_pan(void *__unused);
194194
void cpu_enable_uao(void *__unused);
195+
void cpu_enable_cache_maint_trap(void *__unused);
195196

196197
#endif /* __ASM_PROCESSOR_H */

arch/arm64/include/asm/sysreg.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -98,11 +98,11 @@
9898
SCTLR_ELx_SA | SCTLR_ELx_I)
9999

100100
/* SCTLR_EL1 specific flags. */
101+
#define SCTLR_EL1_UCI (1 << 26)
101102
#define SCTLR_EL1_SPAN (1 << 23)
102103
#define SCTLR_EL1_SED (1 << 8)
103104
#define SCTLR_EL1_CP15BEN (1 << 5)
104105

105-
106106
/* id_aa64isar0 */
107107
#define ID_AA64ISAR0_RDM_SHIFT 28
108108
#define ID_AA64ISAR0_ATOMICS_SHIFT 20

arch/arm64/kernel/cpu_errata.c

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -46,6 +46,7 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
4646
.desc = "ARM errata 826319, 827319, 824069",
4747
.capability = ARM64_WORKAROUND_CLEAN_CACHE,
4848
MIDR_RANGE(MIDR_CORTEX_A53, 0x00, 0x02),
49+
.enable = cpu_enable_cache_maint_trap,
4950
},
5051
#endif
5152
#ifdef CONFIG_ARM64_ERRATUM_819472
@@ -54,6 +55,7 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
5455
.desc = "ARM errata 819472",
5556
.capability = ARM64_WORKAROUND_CLEAN_CACHE,
5657
MIDR_RANGE(MIDR_CORTEX_A53, 0x00, 0x01),
58+
.enable = cpu_enable_cache_maint_trap,
5759
},
5860
#endif
5961
#ifdef CONFIG_ARM64_ERRATUM_832075

arch/arm64/kernel/entry.S

Lines changed: 11 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -451,7 +451,7 @@ el0_sync:
451451
cmp x24, #ESR_ELx_EC_FP_EXC64 // FP/ASIMD exception
452452
b.eq el0_fpsimd_exc
453453
cmp x24, #ESR_ELx_EC_SYS64 // configurable trap
454-
b.eq el0_undef
454+
b.eq el0_sys
455455
cmp x24, #ESR_ELx_EC_SP_ALIGN // stack alignment exception
456456
b.eq el0_sp_pc
457457
cmp x24, #ESR_ELx_EC_PC_ALIGN // pc alignment exception
@@ -579,6 +579,16 @@ el0_undef:
579579
mov x0, sp
580580
bl do_undefinstr
581581
b ret_to_user
582+
el0_sys:
583+
/*
584+
* System instructions, for trapped cache maintenance instructions
585+
*/
586+
enable_dbg_and_irq
587+
ct_user_exit
588+
mov x0, x25
589+
mov x1, sp
590+
bl do_sysinstr
591+
b ret_to_user
582592
el0_dbg:
583593
/*
584594
* Debug exception handling

arch/arm64/kernel/traps.c

Lines changed: 60 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -41,6 +41,7 @@
4141
#include <asm/stacktrace.h>
4242
#include <asm/exception.h>
4343
#include <asm/system_misc.h>
44+
#include <asm/sysreg.h>
4445

4546
static const char *handler[]= {
4647
"Synchronous Abort",
@@ -427,6 +428,65 @@ asmlinkage void __exception do_undefinstr(struct pt_regs *regs)
427428
force_signal_inject(SIGILL, ILL_ILLOPC, regs, 0);
428429
}
429430

431+
void cpu_enable_cache_maint_trap(void *__unused)
432+
{
433+
config_sctlr_el1(SCTLR_EL1_UCI, 0);
434+
}
435+
436+
#define __user_cache_maint(insn, address, res) \
437+
asm volatile ( \
438+
"1: " insn ", %1\n" \
439+
" mov %w0, #0\n" \
440+
"2:\n" \
441+
" .pushsection .fixup,\"ax\"\n" \
442+
" .align 2\n" \
443+
"3: mov %w0, %w2\n" \
444+
" b 2b\n" \
445+
" .popsection\n" \
446+
_ASM_EXTABLE(1b, 3b) \
447+
: "=r" (res) \
448+
: "r" (address), "i" (-EFAULT) )
449+
450+
asmlinkage void __exception do_sysinstr(unsigned int esr, struct pt_regs *regs)
451+
{
452+
unsigned long address;
453+
int ret;
454+
455+
/* if this is a write with: Op0=1, Op2=1, Op1=3, CRn=7 */
456+
if ((esr & 0x01fffc01) == 0x0012dc00) {
457+
int rt = (esr >> 5) & 0x1f;
458+
int crm = (esr >> 1) & 0x0f;
459+
460+
address = (rt == 31) ? 0 : regs->regs[rt];
461+
462+
switch (crm) {
463+
case 11: /* DC CVAU, gets promoted */
464+
__user_cache_maint("dc civac", address, ret);
465+
break;
466+
case 10: /* DC CVAC, gets promoted */
467+
__user_cache_maint("dc civac", address, ret);
468+
break;
469+
case 14: /* DC CIVAC */
470+
__user_cache_maint("dc civac", address, ret);
471+
break;
472+
case 5: /* IC IVAU */
473+
__user_cache_maint("ic ivau", address, ret);
474+
break;
475+
default:
476+
force_signal_inject(SIGILL, ILL_ILLOPC, regs, 0);
477+
return;
478+
}
479+
} else {
480+
force_signal_inject(SIGILL, ILL_ILLOPC, regs, 0);
481+
return;
482+
}
483+
484+
if (ret)
485+
arm64_notify_segfault(regs, address);
486+
else
487+
regs->pc += 4;
488+
}
489+
430490
long compat_arm_syscall(struct pt_regs *regs);
431491

432492
asmlinkage long do_ni_syscall(struct pt_regs *regs)

0 commit comments

Comments
 (0)