Skip to content

Commit 3ebc170

Browse files
Peter Zijlstrasuryasaimadhu
authored andcommitted
x86/bugs: Add retbleed=ibpb
jmp2ret mitigates the easy-to-attack case at relatively low overhead. It mitigates the long speculation windows after a mispredicted RET, but it does not mitigate the short speculation window from arbitrary instruction boundaries. On Zen2, there is a chicken bit which needs setting, which mitigates "arbitrary instruction boundaries" down to just "basic block boundaries". But there is no fix for the short speculation window on basic block boundaries, other than to flush the entire BTB to evict all attacker predictions. On the spectrum of "fast & blurry" -> "safe", there is (on top of STIBP or no-SMT): 1) Nothing System wide open 2) jmp2ret May stop a script kiddy 3) jmp2ret+chickenbit Raises the bar rather further 4) IBPB Only thing which can count as "safe". Tentative numbers put IBPB-on-entry at a 2.5x hit on Zen2, and a 10x hit on Zen1 according to lmbench. [ bp: Fixup feature bit comments, document option, 32-bit build fix. ] Suggested-by: Andrew Cooper <[email protected]> Signed-off-by: Peter Zijlstra (Intel) <[email protected]> Signed-off-by: Borislav Petkov <[email protected]> Reviewed-by: Josh Poimboeuf <[email protected]> Signed-off-by: Borislav Petkov <[email protected]>
1 parent d147553 commit 3ebc170

File tree

6 files changed

+67
-13
lines changed

6 files changed

+67
-13
lines changed

Documentation/admin-guide/kernel-parameters.txt

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -5207,6 +5207,9 @@
52075207
disabling SMT if necessary for
52085208
the full mitigation (only on Zen1
52095209
and older without STIBP).
5210+
ibpb - mitigate short speculation windows on
5211+
basic block boundaries too. Safe, highest
5212+
perf impact.
52105213
unret - force enable untrained return thunks,
52115214
only effective on AMD f15h-f17h
52125215
based systems.

arch/x86/entry/Makefile

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -11,7 +11,7 @@ CFLAGS_REMOVE_common.o = $(CC_FLAGS_FTRACE)
1111

1212
CFLAGS_common.o += -fno-stack-protector
1313

14-
obj-y := entry_$(BITS).o thunk_$(BITS).o syscall_$(BITS).o
14+
obj-y := entry.o entry_$(BITS).o thunk_$(BITS).o syscall_$(BITS).o
1515
obj-y += common.o
1616

1717
obj-y += vdso/

arch/x86/entry/entry.S

Lines changed: 22 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,22 @@
1+
/* SPDX-License-Identifier: GPL-2.0 */
2+
/*
3+
* Common place for both 32- and 64-bit entry routines.
4+
*/
5+
6+
#include <linux/linkage.h>
7+
#include <asm/export.h>
8+
#include <asm/msr-index.h>
9+
10+
.pushsection .noinstr.text, "ax"
11+
12+
SYM_FUNC_START(entry_ibpb)
13+
movl $MSR_IA32_PRED_CMD, %ecx
14+
movl $PRED_CMD_IBPB, %eax
15+
xorl %edx, %edx
16+
wrmsr
17+
RET
18+
SYM_FUNC_END(entry_ibpb)
19+
/* For KVM */
20+
EXPORT_SYMBOL_GPL(entry_ibpb);
21+
22+
.popsection

arch/x86/include/asm/cpufeatures.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -296,7 +296,7 @@
296296
#define X86_FEATURE_PER_THREAD_MBA (11*32+ 7) /* "" Per-thread Memory Bandwidth Allocation */
297297
#define X86_FEATURE_SGX1 (11*32+ 8) /* "" Basic SGX */
298298
#define X86_FEATURE_SGX2 (11*32+ 9) /* "" SGX Enclave Dynamic Memory Management (EDMM) */
299-
/* FREE! (11*32+10) */
299+
#define X86_FEATURE_ENTRY_IBPB (11*32+10) /* "" Issue an IBPB on kernel entry */
300300
/* FREE! (11*32+11) */
301301
#define X86_FEATURE_RETPOLINE (11*32+12) /* "" Generic Retpoline mitigation for Spectre variant 2 */
302302
#define X86_FEATURE_RETPOLINE_LFENCE (11*32+13) /* "" Use LFENCE for Spectre variant 2 */

arch/x86/include/asm/nospec-branch.h

Lines changed: 6 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -123,14 +123,17 @@
123123
* return thunk isn't mapped into the userspace tables (then again, AMD
124124
* typically has NO_MELTDOWN).
125125
*
126-
* Doesn't clobber any registers but does require a stable stack.
126+
* While zen_untrain_ret() doesn't clobber anything but requires stack,
127+
* entry_ibpb() will clobber AX, CX, DX.
127128
*
128129
* As such, this must be placed after every *SWITCH_TO_KERNEL_CR3 at a point
129130
* where we have a stack but before any RET instruction.
130131
*/
131132
.macro UNTRAIN_RET
132133
#ifdef CONFIG_RETPOLINE
133-
ALTERNATIVE "", "call zen_untrain_ret", X86_FEATURE_UNRET
134+
ALTERNATIVE_2 "", \
135+
"call zen_untrain_ret", X86_FEATURE_UNRET, \
136+
"call entry_ibpb", X86_FEATURE_ENTRY_IBPB
134137
#endif
135138
.endm
136139

@@ -147,6 +150,7 @@ extern retpoline_thunk_t __x86_indirect_thunk_array[];
147150

148151
extern void __x86_return_thunk(void);
149152
extern void zen_untrain_ret(void);
153+
extern void entry_ibpb(void);
150154

151155
#ifdef CONFIG_RETPOLINE
152156

arch/x86/kernel/cpu/bugs.c

Lines changed: 34 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -805,6 +805,7 @@ static enum spectre_v2_mitigation spectre_v2_enabled __ro_after_init =
805805
enum retbleed_mitigation {
806806
RETBLEED_MITIGATION_NONE,
807807
RETBLEED_MITIGATION_UNRET,
808+
RETBLEED_MITIGATION_IBPB,
808809
RETBLEED_MITIGATION_IBRS,
809810
RETBLEED_MITIGATION_EIBRS,
810811
};
@@ -813,11 +814,13 @@ enum retbleed_mitigation_cmd {
813814
RETBLEED_CMD_OFF,
814815
RETBLEED_CMD_AUTO,
815816
RETBLEED_CMD_UNRET,
817+
RETBLEED_CMD_IBPB,
816818
};
817819

818820
const char * const retbleed_strings[] = {
819821
[RETBLEED_MITIGATION_NONE] = "Vulnerable",
820822
[RETBLEED_MITIGATION_UNRET] = "Mitigation: untrained return thunk",
823+
[RETBLEED_MITIGATION_IBPB] = "Mitigation: IBPB",
821824
[RETBLEED_MITIGATION_IBRS] = "Mitigation: IBRS",
822825
[RETBLEED_MITIGATION_EIBRS] = "Mitigation: Enhanced IBRS",
823826
};
@@ -847,6 +850,8 @@ static int __init retbleed_parse_cmdline(char *str)
847850
retbleed_cmd = RETBLEED_CMD_AUTO;
848851
} else if (!strcmp(str, "unret")) {
849852
retbleed_cmd = RETBLEED_CMD_UNRET;
853+
} else if (!strcmp(str, "ibpb")) {
854+
retbleed_cmd = RETBLEED_CMD_IBPB;
850855
} else if (!strcmp(str, "nosmt")) {
851856
retbleed_nosmt = true;
852857
} else {
@@ -861,11 +866,13 @@ static int __init retbleed_parse_cmdline(char *str)
861866
early_param("retbleed", retbleed_parse_cmdline);
862867

863868
#define RETBLEED_UNTRAIN_MSG "WARNING: BTB untrained return thunk mitigation is only effective on AMD/Hygon!\n"
864-
#define RETBLEED_COMPILER_MSG "WARNING: kernel not compiled with RETPOLINE or -mfunction-return capable compiler!\n"
869+
#define RETBLEED_COMPILER_MSG "WARNING: kernel not compiled with RETPOLINE or -mfunction-return capable compiler; falling back to IBPB!\n"
865870
#define RETBLEED_INTEL_MSG "WARNING: Spectre v2 mitigation leaves CPU vulnerable to RETBleed attacks, data leaks possible!\n"
866871

867872
static void __init retbleed_select_mitigation(void)
868873
{
874+
bool mitigate_smt = false;
875+
869876
if (!boot_cpu_has_bug(X86_BUG_RETBLEED) || cpu_mitigations_off())
870877
return;
871878

@@ -877,11 +884,21 @@ static void __init retbleed_select_mitigation(void)
877884
retbleed_mitigation = RETBLEED_MITIGATION_UNRET;
878885
break;
879886

887+
case RETBLEED_CMD_IBPB:
888+
retbleed_mitigation = RETBLEED_MITIGATION_IBPB;
889+
break;
890+
880891
case RETBLEED_CMD_AUTO:
881892
default:
882893
if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD ||
883-
boot_cpu_data.x86_vendor == X86_VENDOR_HYGON)
884-
retbleed_mitigation = RETBLEED_MITIGATION_UNRET;
894+
boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) {
895+
896+
if (IS_ENABLED(CONFIG_RETPOLINE) &&
897+
IS_ENABLED(CONFIG_CC_HAS_RETURN_THUNK))
898+
retbleed_mitigation = RETBLEED_MITIGATION_UNRET;
899+
else
900+
retbleed_mitigation = RETBLEED_MITIGATION_IBPB;
901+
}
885902

886903
/*
887904
* The Intel mitigation (IBRS) was already selected in
@@ -897,26 +914,34 @@ static void __init retbleed_select_mitigation(void)
897914
if (!IS_ENABLED(CONFIG_RETPOLINE) ||
898915
!IS_ENABLED(CONFIG_CC_HAS_RETURN_THUNK)) {
899916
pr_err(RETBLEED_COMPILER_MSG);
900-
retbleed_mitigation = RETBLEED_MITIGATION_NONE;
901-
break;
917+
retbleed_mitigation = RETBLEED_MITIGATION_IBPB;
918+
goto retbleed_force_ibpb;
902919
}
903920

904921
setup_force_cpu_cap(X86_FEATURE_RETHUNK);
905922
setup_force_cpu_cap(X86_FEATURE_UNRET);
906923

907-
if (!boot_cpu_has(X86_FEATURE_STIBP) &&
908-
(retbleed_nosmt || cpu_mitigations_auto_nosmt()))
909-
cpu_smt_disable(false);
910-
911924
if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD &&
912925
boot_cpu_data.x86_vendor != X86_VENDOR_HYGON)
913926
pr_err(RETBLEED_UNTRAIN_MSG);
927+
928+
mitigate_smt = true;
929+
break;
930+
931+
case RETBLEED_MITIGATION_IBPB:
932+
retbleed_force_ibpb:
933+
setup_force_cpu_cap(X86_FEATURE_ENTRY_IBPB);
934+
mitigate_smt = true;
914935
break;
915936

916937
default:
917938
break;
918939
}
919940

941+
if (mitigate_smt && !boot_cpu_has(X86_FEATURE_STIBP) &&
942+
(retbleed_nosmt || cpu_mitigations_auto_nosmt()))
943+
cpu_smt_disable(false);
944+
920945
/*
921946
* Let IBRS trump all on Intel without affecting the effects of the
922947
* retbleed= cmdline option.

0 commit comments

Comments
 (0)