Skip to content

Commit 2641f08

Browse files
dwmw2KAGA-KOKO
authored andcommitted
x86/retpoline/entry: Convert entry assembler indirect jumps
Convert indirect jumps in core 32/64bit entry assembler code to use non-speculative sequences when CONFIG_RETPOLINE is enabled. Don't use CALL_NOSPEC in entry_SYSCALL_64_fastpath because the return address after the 'call' instruction must be *precisely* at the .Lentry_SYSCALL_64_after_fastpath label for stub_ptregs_64 to work, and the use of alternatives will mess that up unless we play horrid games to prepend with NOPs and make the variants the same length. It's not worth it; in the case where we ALTERNATIVE out the retpoline, the first instruction at __x86.indirect_thunk.rax is going to be a bare jmp *%rax anyway. Signed-off-by: David Woodhouse <[email protected]> Signed-off-by: Thomas Gleixner <[email protected]> Acked-by: Ingo Molnar <[email protected]> Acked-by: Arjan van de Ven <[email protected]> Cc: [email protected] Cc: Rik van Riel <[email protected]> Cc: Andi Kleen <[email protected]> Cc: Josh Poimboeuf <[email protected]> Cc: [email protected] Cc: Peter Zijlstra <[email protected]> Cc: Linus Torvalds <[email protected]> Cc: Jiri Kosina <[email protected]> Cc: Andy Lutomirski <[email protected]> Cc: Dave Hansen <[email protected]> Cc: Kees Cook <[email protected]> Cc: Tim Chen <[email protected]> Cc: Greg Kroah-Hartman <[email protected]> Cc: Paul Turner <[email protected]> Link: https://lkml.kernel.org/r/[email protected]
1 parent 9697fa3 commit 2641f08

File tree

2 files changed

+12
-5
lines changed

2 files changed

+12
-5
lines changed

arch/x86/entry/entry_32.S

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -44,6 +44,7 @@
4444
#include <asm/asm.h>
4545
#include <asm/smap.h>
4646
#include <asm/frame.h>
47+
#include <asm/nospec-branch.h>
4748

4849
.section .entry.text, "ax"
4950

@@ -290,7 +291,7 @@ ENTRY(ret_from_fork)
290291

291292
/* kernel thread */
292293
1: movl %edi, %eax
293-
call *%ebx
294+
CALL_NOSPEC %ebx
294295
/*
295296
* A kernel thread is allowed to return here after successfully
296297
* calling do_execve(). Exit to userspace to complete the execve()
@@ -919,7 +920,7 @@ common_exception:
919920
movl %ecx, %es
920921
TRACE_IRQS_OFF
921922
movl %esp, %eax # pt_regs pointer
922-
call *%edi
923+
CALL_NOSPEC %edi
923924
jmp ret_from_exception
924925
END(common_exception)
925926

arch/x86/entry/entry_64.S

Lines changed: 9 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -37,6 +37,7 @@
3737
#include <asm/pgtable_types.h>
3838
#include <asm/export.h>
3939
#include <asm/frame.h>
40+
#include <asm/nospec-branch.h>
4041
#include <linux/err.h>
4142

4243
#include "calling.h"
@@ -187,7 +188,7 @@ ENTRY(entry_SYSCALL_64_trampoline)
187188
*/
188189
pushq %rdi
189190
movq $entry_SYSCALL_64_stage2, %rdi
190-
jmp *%rdi
191+
JMP_NOSPEC %rdi
191192
END(entry_SYSCALL_64_trampoline)
192193

193194
.popsection
@@ -266,7 +267,12 @@ entry_SYSCALL_64_fastpath:
266267
* It might end up jumping to the slow path. If it jumps, RAX
267268
* and all argument registers are clobbered.
268269
*/
270+
#ifdef CONFIG_RETPOLINE
271+
movq sys_call_table(, %rax, 8), %rax
272+
call __x86_indirect_thunk_rax
273+
#else
269274
call *sys_call_table(, %rax, 8)
275+
#endif
270276
.Lentry_SYSCALL_64_after_fastpath_call:
271277

272278
movq %rax, RAX(%rsp)
@@ -438,7 +444,7 @@ ENTRY(stub_ptregs_64)
438444
jmp entry_SYSCALL64_slow_path
439445

440446
1:
441-
jmp *%rax /* Called from C */
447+
JMP_NOSPEC %rax /* Called from C */
442448
END(stub_ptregs_64)
443449

444450
.macro ptregs_stub func
@@ -517,7 +523,7 @@ ENTRY(ret_from_fork)
517523
1:
518524
/* kernel thread */
519525
movq %r12, %rdi
520-
call *%rbx
526+
CALL_NOSPEC %rbx
521527
/*
522528
* A kernel thread is allowed to return here after successfully
523529
* calling do_execve(). Exit to userspace to complete the execve()

0 commit comments

Comments
 (0)