Skip to content

Commit 3c76db7

Browse files
author
Ingo Molnar
committed
Merge branch 'x86/pti' into x86/mm, to pick up dependencies
Signed-off-by: Ingo Molnar <[email protected]>
2 parents 194a974 + 7958b22 commit 3c76db7

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

69 files changed

+910
-471
lines changed

Makefile

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -489,6 +489,11 @@ KBUILD_CFLAGS += $(CLANG_TARGET) $(CLANG_GCC_TC)
489489
KBUILD_AFLAGS += $(CLANG_TARGET) $(CLANG_GCC_TC)
490490
endif
491491

492+
RETPOLINE_CFLAGS_GCC := -mindirect-branch=thunk-extern -mindirect-branch-register
493+
RETPOLINE_CFLAGS_CLANG := -mretpoline-external-thunk
494+
RETPOLINE_CFLAGS := $(call cc-option,$(RETPOLINE_CFLAGS_GCC),$(call cc-option,$(RETPOLINE_CFLAGS_CLANG)))
495+
export RETPOLINE_CFLAGS
496+
492497
ifeq ($(config-targets),1)
493498
# ===========================================================================
494499
# *config targets only - make sure prerequisites are updated, and descend

arch/x86/Kconfig

Lines changed: 2 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -430,6 +430,7 @@ config GOLDFISH
430430
config RETPOLINE
431431
bool "Avoid speculative indirect branches in kernel"
432432
default y
433+
select STACK_VALIDATION if HAVE_STACK_VALIDATION
433434
help
434435
Compile kernel with the retpoline compiler options to guard against
435436
kernel-to-user data leaks by avoiding speculative indirect
@@ -2315,23 +2316,14 @@ choice
23152316
it can be used to assist security vulnerability exploitation.
23162317

23172318
This setting can be changed at boot time via the kernel command
2318-
line parameter vsyscall=[native|emulate|none].
2319+
line parameter vsyscall=[emulate|none].
23192320

23202321
On a system with recent enough glibc (2.14 or newer) and no
23212322
static binaries, you can say None without a performance penalty
23222323
to improve security.
23232324

23242325
If unsure, select "Emulate".
23252326

2326-
config LEGACY_VSYSCALL_NATIVE
2327-
bool "Native"
2328-
help
2329-
Actual executable code is located in the fixed vsyscall
2330-
address mapping, implementing time() efficiently. Since
2331-
this makes the mapping executable, it can be used during
2332-
security vulnerability exploitation (traditionally as
2333-
ROP gadgets). This configuration is not recommended.
2334-
23352327
config LEGACY_VSYSCALL_EMULATE
23362328
bool "Emulate"
23372329
help

arch/x86/Makefile

Lines changed: 3 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -232,10 +232,9 @@ KBUILD_CFLAGS += -fno-asynchronous-unwind-tables
232232

233233
# Avoid indirect branches in kernel to deal with Spectre
234234
ifdef CONFIG_RETPOLINE
235-
RETPOLINE_CFLAGS += $(call cc-option,-mindirect-branch=thunk-extern -mindirect-branch-register)
236-
ifneq ($(RETPOLINE_CFLAGS),)
237-
KBUILD_CFLAGS += $(RETPOLINE_CFLAGS) -DRETPOLINE
238-
endif
235+
ifneq ($(RETPOLINE_CFLAGS),)
236+
KBUILD_CFLAGS += $(RETPOLINE_CFLAGS) -DRETPOLINE
237+
endif
239238
endif
240239

241240
archscripts: scripts_basic

arch/x86/entry/calling.h

Lines changed: 19 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -97,40 +97,49 @@ For 32-bit we have the following conventions - kernel is built with
9797

9898
#define SIZEOF_PTREGS 21*8
9999

100-
.macro PUSH_AND_CLEAR_REGS rdx=%rdx rax=%rax
100+
.macro PUSH_AND_CLEAR_REGS rdx=%rdx rax=%rax save_ret=0
101101
/*
102102
* Push registers and sanitize registers of values that a
103103
* speculation attack might otherwise want to exploit. The
104104
* lower registers are likely clobbered well before they
105105
* could be put to use in a speculative execution gadget.
106106
* Interleave XOR with PUSH for better uop scheduling:
107107
*/
108+
.if \save_ret
109+
pushq %rsi /* pt_regs->si */
110+
movq 8(%rsp), %rsi /* temporarily store the return address in %rsi */
111+
movq %rdi, 8(%rsp) /* pt_regs->di (overwriting original return address) */
112+
.else
108113
pushq %rdi /* pt_regs->di */
109114
pushq %rsi /* pt_regs->si */
115+
.endif
110116
pushq \rdx /* pt_regs->dx */
111117
pushq %rcx /* pt_regs->cx */
112118
pushq \rax /* pt_regs->ax */
113119
pushq %r8 /* pt_regs->r8 */
114-
xorq %r8, %r8 /* nospec r8 */
120+
xorl %r8d, %r8d /* nospec r8 */
115121
pushq %r9 /* pt_regs->r9 */
116-
xorq %r9, %r9 /* nospec r9 */
122+
xorl %r9d, %r9d /* nospec r9 */
117123
pushq %r10 /* pt_regs->r10 */
118-
xorq %r10, %r10 /* nospec r10 */
124+
xorl %r10d, %r10d /* nospec r10 */
119125
pushq %r11 /* pt_regs->r11 */
120-
xorq %r11, %r11 /* nospec r11*/
126+
xorl %r11d, %r11d /* nospec r11*/
121127
pushq %rbx /* pt_regs->rbx */
122128
xorl %ebx, %ebx /* nospec rbx*/
123129
pushq %rbp /* pt_regs->rbp */
124130
xorl %ebp, %ebp /* nospec rbp*/
125131
pushq %r12 /* pt_regs->r12 */
126-
xorq %r12, %r12 /* nospec r12*/
132+
xorl %r12d, %r12d /* nospec r12*/
127133
pushq %r13 /* pt_regs->r13 */
128-
xorq %r13, %r13 /* nospec r13*/
134+
xorl %r13d, %r13d /* nospec r13*/
129135
pushq %r14 /* pt_regs->r14 */
130-
xorq %r14, %r14 /* nospec r14*/
136+
xorl %r14d, %r14d /* nospec r14*/
131137
pushq %r15 /* pt_regs->r15 */
132-
xorq %r15, %r15 /* nospec r15*/
138+
xorl %r15d, %r15d /* nospec r15*/
133139
UNWIND_HINT_REGS
140+
.if \save_ret
141+
pushq %rsi /* return address on top of stack */
142+
.endif
134143
.endm
135144

136145
.macro POP_REGS pop_rdi=1 skip_r11rcx=0
@@ -172,12 +181,7 @@ For 32-bit we have the following conventions - kernel is built with
172181
*/
173182
.macro ENCODE_FRAME_POINTER ptregs_offset=0
174183
#ifdef CONFIG_FRAME_POINTER
175-
.if \ptregs_offset
176-
leaq \ptregs_offset(%rsp), %rbp
177-
.else
178-
mov %rsp, %rbp
179-
.endif
180-
orq $0x1, %rbp
184+
leaq 1+\ptregs_offset(%rsp), %rbp
181185
#endif
182186
.endm
183187

arch/x86/entry/entry_32.S

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -252,8 +252,7 @@ ENTRY(__switch_to_asm)
252252
* exist, overwrite the RSB with entries which capture
253253
* speculative execution to prevent attack.
254254
*/
255-
/* Clobbers %ebx */
256-
FILL_RETURN_BUFFER RSB_CLEAR_LOOPS, X86_FEATURE_RSB_CTXSW
255+
FILL_RETURN_BUFFER %ebx, RSB_CLEAR_LOOPS, X86_FEATURE_RSB_CTXSW
257256
#endif
258257

259258
/* restore callee-saved registers */

0 commit comments

Comments
 (0)