7
7
#include <asm/alternative-asm.h>
8
8
#include <asm/cpufeatures.h>
9
9
10
+ /*
11
+ * Fill the CPU return stack buffer.
12
+ *
13
+ * Each entry in the RSB, if used for a speculative 'ret', contains an
14
+ * infinite 'pause; jmp' loop to capture speculative execution.
15
+ *
16
+ * This is required in various cases for retpoline and IBRS-based
17
+ * mitigations for the Spectre variant 2 vulnerability. Sometimes to
18
+ * eliminate potentially bogus entries from the RSB, and sometimes
19
+ * purely to ensure that it doesn't get empty, which on some CPUs would
20
+ * allow predictions from other (unwanted!) sources to be used.
21
+ *
22
+ * We define a CPP macro such that it can be used from both .S files and
23
+ * inline assembly. It's possible to do a .macro and then include that
24
+ * from C via asm(".include <asm/nospec-branch.h>") but let's not go there.
25
+ */
26
+
27
+ #define RSB_CLEAR_LOOPS 32 /* To forcibly overwrite all entries */
28
+ #define RSB_FILL_LOOPS 16 /* To avoid underflow */
29
+
30
+ /*
31
+ * Google experimented with loop-unrolling and this turned out to be
32
+ * the optimal version — two calls, each with their own speculation
33
+ * trap should their return address end up getting used, in a loop.
34
+ */
35
+ #define __FILL_RETURN_BUFFER (reg , nr , sp ) \
36
+ mov $(nr/2), reg; \
37
+ 771: \
38
+ call 772f; \
39
+ 773: /* speculation trap */ \
40
+ pause ; \
41
+ jmp 773b ; \
42
+ 772 : \
43
+ call 774f ; \
44
+ 775 : /* speculation trap */ \
45
+ pause ; \
46
+ jmp 775b ; \
47
+ 774 : \
48
+ dec reg ; \
49
+ jnz 771b ; \
50
+ add $ (BITS_PER_LONG /8 ) * nr , sp ;
51
+
10
52
#ifdef __ASSEMBLY__
11
53
12
54
/*
74
116
#else
75
117
call * \reg
76
118
#endif
119
+ .endm
120
+
121
+ /*
122
+ * A simpler FILL_RETURN_BUFFER macro. Don't make people use the CPP
123
+ * monstrosity above, manually.
124
+ */
125
+ .macro FILL_RETURN_BUFFER reg :req nr :req ftr :req
126
+ #ifdef CONFIG_RETPOLINE
127
+ ANNOTATE_NOSPEC_ALTERNATIVE
128
+ ALTERNATIVE "jmp .Lskip_rsb_\@" , \
129
+ __stringify (__FILL_RETURN_BUFFER (\reg ,\nr ,%_ASM_SP )) \
130
+ \ftr
131
+ .Lskip_rsb_ \@:
132
+ #endif
77
133
.endm
78
134
79
135
#else /* __ASSEMBLY__ */
119
175
X86_FEATURE_RETPOLINE)
120
176
121
177
# define THUNK_TARGET (addr ) [thunk_target] "rm" (addr)
122
- #else /* No retpoline */
178
+ #else /* No retpoline for C / inline asm */
123
179
# define CALL_NOSPEC "call *%[thunk_target]\n"
124
180
# define THUNK_TARGET (addr ) [thunk_target] "rm" (addr)
125
181
#endif
@@ -134,5 +190,25 @@ enum spectre_v2_mitigation {
134
190
SPECTRE_V2_IBRS ,
135
191
};
136
192
193
+ /*
194
+ * On VMEXIT we must ensure that no RSB predictions learned in the guest
195
+ * can be followed in the host, by overwriting the RSB completely. Both
196
+ * retpoline and IBRS mitigations for Spectre v2 need this; only on future
197
+ * CPUs with IBRS_ATT *might* it be avoided.
198
+ */
199
+ static inline void vmexit_fill_RSB (void )
200
+ {
201
+ #ifdef CONFIG_RETPOLINE
202
+ unsigned long loops = RSB_CLEAR_LOOPS / 2 ;
203
+
204
+ asm volatile (ANNOTATE_NOSPEC_ALTERNATIVE
205
+ ALTERNATIVE ("jmp 910f" ,
206
+ __stringify (__FILL_RETURN_BUFFER (%0 , RSB_CLEAR_LOOPS , %1 )),
207
+ X86_FEATURE_RETPOLINE )
208
+ "910:"
209
+ : "=&r" (loops ), ASM_CALL_CONSTRAINT
210
+ : "r" (loops ) : "memory" );
211
+ #endif
212
+ }
137
213
#endif /* __ASSEMBLY__ */
138
214
#endif /* __NOSPEC_BRANCH_H__ */
0 commit comments