Skip to content

Commit 117cc7a

Browse files
dwmw2KAGA-KOKO
authored andcommitted
x86/retpoline: Fill return stack buffer on vmexit
In accordance with the Intel and AMD documentation, we need to overwrite all entries in the RSB on exiting a guest, to prevent malicious branch target predictions from affecting the host kernel. This is needed both for retpoline and for IBRS. [ak: numbers again for the RSB stuffing labels] Signed-off-by: David Woodhouse <[email protected]> Signed-off-by: Thomas Gleixner <[email protected]> Tested-by: Peter Zijlstra (Intel) <[email protected]> Cc: [email protected] Cc: Rik van Riel <[email protected]> Cc: Andi Kleen <[email protected]> Cc: Josh Poimboeuf <[email protected]> Cc: [email protected] Cc: Linus Torvalds <[email protected]> Cc: Jiri Kosina <[email protected]> Cc: Andy Lutomirski <[email protected]> Cc: Dave Hansen <[email protected]> Cc: Kees Cook <[email protected]> Cc: Tim Chen <[email protected]> Cc: Greg Kroah-Hartman <[email protected]> Cc: Paul Turner <[email protected]> Link: https://lkml.kernel.org/r/[email protected]
1 parent 7614e91 commit 117cc7a

File tree

3 files changed

+85
-1
lines changed

3 files changed

+85
-1
lines changed

arch/x86/include/asm/nospec-branch.h

Lines changed: 77 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -7,6 +7,48 @@
77
#include <asm/alternative-asm.h>
88
#include <asm/cpufeatures.h>
99

10+
/*
11+
* Fill the CPU return stack buffer.
12+
*
13+
* Each entry in the RSB, if used for a speculative 'ret', contains an
14+
* infinite 'pause; jmp' loop to capture speculative execution.
15+
*
16+
* This is required in various cases for retpoline and IBRS-based
17+
* mitigations for the Spectre variant 2 vulnerability. Sometimes to
18+
* eliminate potentially bogus entries from the RSB, and sometimes
19+
* purely to ensure that it doesn't get empty, which on some CPUs would
20+
* allow predictions from other (unwanted!) sources to be used.
21+
*
22+
* We define a CPP macro such that it can be used from both .S files and
23+
* inline assembly. It's possible to do a .macro and then include that
24+
* from C via asm(".include <asm/nospec-branch.h>") but let's not go there.
25+
*/
26+
27+
#define RSB_CLEAR_LOOPS 32 /* To forcibly overwrite all entries */
28+
#define RSB_FILL_LOOPS 16 /* To avoid underflow */
29+
30+
/*
31+
* Google experimented with loop-unrolling and this turned out to be
32+
* the optimal version — two calls, each with their own speculation
33+
* trap should their return address end up getting used, in a loop.
34+
*/
35+
#define __FILL_RETURN_BUFFER(reg, nr, sp) \
36+
mov $(nr/2), reg; \
37+
771: \
38+
call 772f; \
39+
773: /* speculation trap */ \
40+
pause; \
41+
jmp 773b; \
42+
772: \
43+
call 774f; \
44+
775: /* speculation trap */ \
45+
pause; \
46+
jmp 775b; \
47+
774: \
48+
dec reg; \
49+
jnz 771b; \
50+
add $(BITS_PER_LONG/8) * nr, sp;
51+
1052
#ifdef __ASSEMBLY__
1153

1254
/*
@@ -74,6 +116,20 @@
74116
#else
75117
call *\reg
76118
#endif
119+
.endm
120+
121+
/*
122+
* A simpler FILL_RETURN_BUFFER macro. Don't make people use the CPP
123+
* monstrosity above, manually.
124+
*/
125+
.macro FILL_RETURN_BUFFER reg:req nr:req ftr:req
126+
#ifdef CONFIG_RETPOLINE
127+
ANNOTATE_NOSPEC_ALTERNATIVE
128+
ALTERNATIVE "jmp .Lskip_rsb_\@", \
129+
__stringify(__FILL_RETURN_BUFFER(\reg,\nr,%_ASM_SP)) \
130+
\ftr
131+
.Lskip_rsb_\@:
132+
#endif
77133
.endm
78134

79135
#else /* __ASSEMBLY__ */
@@ -119,7 +175,7 @@
119175
X86_FEATURE_RETPOLINE)
120176

121177
# define THUNK_TARGET(addr) [thunk_target] "rm" (addr)
122-
#else /* No retpoline */
178+
#else /* No retpoline for C / inline asm */
123179
# define CALL_NOSPEC "call *%[thunk_target]\n"
124180
# define THUNK_TARGET(addr) [thunk_target] "rm" (addr)
125181
#endif
@@ -134,5 +190,25 @@ enum spectre_v2_mitigation {
134190
SPECTRE_V2_IBRS,
135191
};
136192

193+
/*
194+
* On VMEXIT we must ensure that no RSB predictions learned in the guest
195+
* can be followed in the host, by overwriting the RSB completely. Both
196+
* retpoline and IBRS mitigations for Spectre v2 need this; only on future
197+
* CPUs with IBRS_ATT *might* it be avoided.
198+
*/
199+
static inline void vmexit_fill_RSB(void)
200+
{
201+
#ifdef CONFIG_RETPOLINE
202+
unsigned long loops = RSB_CLEAR_LOOPS / 2;
203+
204+
asm volatile (ANNOTATE_NOSPEC_ALTERNATIVE
205+
ALTERNATIVE("jmp 910f",
206+
__stringify(__FILL_RETURN_BUFFER(%0, RSB_CLEAR_LOOPS, %1)),
207+
X86_FEATURE_RETPOLINE)
208+
"910:"
209+
: "=&r" (loops), ASM_CALL_CONSTRAINT
210+
: "r" (loops) : "memory" );
211+
#endif
212+
}
137213
#endif /* __ASSEMBLY__ */
138214
#endif /* __NOSPEC_BRANCH_H__ */

arch/x86/kvm/svm.c

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -45,6 +45,7 @@
4545
#include <asm/debugreg.h>
4646
#include <asm/kvm_para.h>
4747
#include <asm/irq_remapping.h>
48+
#include <asm/nospec-branch.h>
4849

4950
#include <asm/virtext.h>
5051
#include "trace.h"
@@ -4985,6 +4986,9 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
49854986
#endif
49864987
);
49874988

4989+
/* Eliminate branch target predictions from guest mode */
4990+
vmexit_fill_RSB();
4991+
49884992
#ifdef CONFIG_X86_64
49894993
wrmsrl(MSR_GS_BASE, svm->host.gs_base);
49904994
#else

arch/x86/kvm/vmx.c

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -50,6 +50,7 @@
5050
#include <asm/apic.h>
5151
#include <asm/irq_remapping.h>
5252
#include <asm/mmu_context.h>
53+
#include <asm/nospec-branch.h>
5354

5455
#include "trace.h"
5556
#include "pmu.h"
@@ -9403,6 +9404,9 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
94039404
#endif
94049405
);
94059406

9407+
/* Eliminate branch target predictions from guest mode */
9408+
vmexit_fill_RSB();
9409+
94069410
/* MSR_IA32_DEBUGCTLMSR is zeroed on vmexit. Restore it if needed */
94079411
if (debugctlmsr)
94089412
update_debugctlmsr(debugctlmsr);

0 commit comments

Comments
 (0)