Skip to content

Commit eeabac7

Browse files
committed
sparc64: Validate kernel generated fault addresses on sparc64.
In order to handle all of the cases of address calculation overflow properly, we run sparc 32-bit processes in "address masking" mode when running on a 64-bit kernel. Address masking mode zeros out the top 32-bits of the address calculated for every load and store instruction. However, when we're in privileged mode we have to run with that address masking mode disabled even when accessing userspace from the kernel. To "simulate" the address masking mode we clear the top-bits by hand for 32-bit processes in the fault handler. It is the responsibility of code in the compat layer to properly zero extend addresses used to access userspace. If this isn't followed properly we can get into a fault loop. Say that the user address is 0xf0000000 but for whatever reason the kernel code sign extends this to 64-bit, and then the kernel tries to access the result. In such a case we'll fault on address 0xfffffffff0000000 but the fault handler will process that fault as if it were to address 0xf0000000. We'll loop faulting forever because the fault never gets satisfied. So add a check specifically for this case, when the kernel is faulting on a user address access and the addresses don't match up. This code path is sufficiently slow path, and this bug is sufficiently painful to diagnose, that this kind of bug check is warranted. Signed-off-by: David S. Miller <[email protected]>
1 parent 802c64b commit eeabac7

File tree

1 file changed

+18
-7
lines changed

1 file changed

+18
-7
lines changed

arch/sparc/mm/fault_64.c

Lines changed: 18 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -19,6 +19,7 @@
1919
#include <linux/interrupt.h>
2020
#include <linux/kprobes.h>
2121
#include <linux/kdebug.h>
22+
#include <linux/percpu.h>
2223

2324
#include <asm/page.h>
2425
#include <asm/pgtable.h>
@@ -244,8 +245,14 @@ asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
244245
(fault_code & FAULT_CODE_DTLB))
245246
BUG();
246247

248+
if (test_thread_flag(TIF_32BIT)) {
249+
if (!(regs->tstate & TSTATE_PRIV))
250+
regs->tpc &= 0xffffffff;
251+
address &= 0xffffffff;
252+
}
253+
247254
if (regs->tstate & TSTATE_PRIV) {
248-
unsigned long tpc = regs->tpc;
255+
unsigned long eaddr, tpc = regs->tpc;
249256

250257
/* Sanity check the PC. */
251258
if ((tpc >= KERNBASE && tpc < (unsigned long) __init_end) ||
@@ -255,6 +262,16 @@ asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
255262
bad_kernel_pc(regs, address);
256263
return;
257264
}
265+
266+
insn = get_fault_insn(regs, insn);
267+
eaddr = compute_effective_address(regs, insn, 0);
268+
if (WARN_ON_ONCE((eaddr & PAGE_MASK) != (address & PAGE_MASK))){
269+
printk(KERN_ERR "FAULT: Mismatch kernel fault "
270+
"address: addr[%lx] eaddr[%lx] TPC[%lx]\n",
271+
address, eaddr, tpc);
272+
show_regs(regs);
273+
goto handle_kernel_fault;
274+
}
258275
}
259276

260277
/*
@@ -264,12 +281,6 @@ asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
264281
if (in_atomic() || !mm)
265282
goto intr_or_no_mm;
266283

267-
if (test_thread_flag(TIF_32BIT)) {
268-
if (!(regs->tstate & TSTATE_PRIV))
269-
regs->tpc &= 0xffffffff;
270-
address &= 0xffffffff;
271-
}
272-
273284
if (!down_read_trylock(&mm->mmap_sem)) {
274285
if ((regs->tstate & TSTATE_PRIV) &&
275286
!search_exception_tables(regs->tpc)) {

0 commit comments

Comments
 (0)