Skip to content

Commit a7514ec

Browse files
KAGA-KOKOkonradwilk
authored andcommitted
x86/process: Allow runtime control of Speculative Store Bypass
The Speculative Store Bypass vulnerability can be mitigated with the Reduced Data Speculation (RDS) feature. To allow finer grained control of this eventually expensive mitigation a per task mitigation control is required. Add a new TIF_RDS flag and put it into the group of TIF flags which are evaluated for mismatch in switch_to(). If these bits differ in the previous and the next task, then the slow path function __switch_to_xtra() is invoked. Implement the TIF_RDS dependent mitigation control in the slow path. If the prctl for controlling Speculative Store Bypass is disabled or no task uses the prctl then there is no overhead in the switch_to() fast path. Update the KVM related speculation control functions to take TID_RDS into account as well. Based on a patch from Tim Chen. Completely rewritten. Signed-off-by: Thomas Gleixner <[email protected]> Reviewed-by: Ingo Molnar <[email protected]> Reviewed-by: Konrad Rzeszutek Wilk <[email protected]> (cherry picked from commit 885f82b) Orabug: 28034177 CVE: CVE-2018-3639 Signed-off-by: Konrad Rzeszutek Wilk <[email protected]> Tested-by: Mihai Carabas <[email protected]> Reviewed-by: Mihai Carabas <[email protected]> Reviewed-by: John Haxby <[email protected]> Conflicts: arch/x86/kernel/cpu/bugs.c [As we have the IBRS support we must preserve the ibrs_inuse] --- v2: Added missing 'return'
1 parent 640ab21 commit a7514ec

File tree

5 files changed

+73
-7
lines changed

5 files changed

+73
-7
lines changed

arch/x86/include/asm/msr-index.h

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -44,7 +44,8 @@
4444
#define SPEC_CTRL_IBRS (1 << 0)
4545
#define SPEC_CTRL_FEATURE_ENABLE_IBRS SPEC_CTRL_IBRS /* Indirect Branch Restricted Speculation */
4646
#define SPEC_CTRL_STIBP (1 << 1) /* Single Thread Indirect Branch Predictors */
47-
#define SPEC_CTRL_RDS (1 << 2) /* Reduced Data Speculation */
47+
#define SPEC_CTRL_RDS_SHIFT 2 /* Reduced Data Speculation bit */
48+
#define SPEC_CTRL_RDS (1 << SPEC_CTRL_RDS_SHIFT) /* Reduced Data Speculation */
4849

4950
#define MSR_IA32_PRED_CMD 0x00000049 /* Prediction Command */
5051
#define PRED_CMD_IBPB (1 << 0) /* Indirect Branch Prediction Barrier */

arch/x86/include/asm/spec-ctrl.h

Lines changed: 17 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2,6 +2,7 @@
22
#ifndef _ASM_X86_SPECCTRL_H_
33
#define _ASM_X86_SPECCTRL_H_
44

5+
#include <linux/thread_info.h>
56
#include <asm/nospec-branch.h>
67

78
/*
@@ -18,4 +19,20 @@ extern void x86_spec_ctrl_restore_host(u64);
1819
extern u64 x86_amd_ls_cfg_base;
1920
extern u64 x86_amd_ls_cfg_rds_mask;
2021

22+
/* The Intel SPEC CTRL MSR base value cache */
23+
extern u64 x86_spec_ctrl_base;
24+
25+
static inline u64 rds_tif_to_spec_ctrl(u64 tifn)
26+
{
27+
BUILD_BUG_ON(TIF_RDS < SPEC_CTRL_RDS_SHIFT);
28+
return (tifn & _TIF_RDS) >> (TIF_RDS - SPEC_CTRL_RDS_SHIFT);
29+
}
30+
31+
static inline u64 rds_tif_to_amd_ls_cfg(u64 tifn)
32+
{
33+
return (tifn & _TIF_RDS) ? x86_amd_ls_cfg_rds_mask : 0ULL;
34+
}
35+
36+
extern void speculative_store_bypass_update(void);
37+
2138
#endif

arch/x86/include/asm/thread_info.h

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -81,6 +81,7 @@ struct thread_info {
8181
#define TIF_SIGPENDING 2 /* signal pending */
8282
#define TIF_NEED_RESCHED 3 /* rescheduling necessary */
8383
#define TIF_SINGLESTEP 4 /* reenable singlestep on user return*/
84+
#define TIF_RDS 5 /* Reduced data speculation */
8485
#define TIF_SYSCALL_EMU 6 /* syscall emulation active */
8586
#define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */
8687
#define TIF_SECCOMP 8 /* secure computing */
@@ -109,6 +110,7 @@ struct thread_info {
109110
#define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
110111
#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)
111112
#define _TIF_SINGLESTEP (1 << TIF_SINGLESTEP)
113+
#define _TIF_RDS (1 << TIF_RDS)
112114
#define _TIF_SYSCALL_EMU (1 << TIF_SYSCALL_EMU)
113115
#define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT)
114116
#define _TIF_SECCOMP (1 << TIF_SECCOMP)
@@ -150,7 +152,7 @@ struct thread_info {
150152

151153
/* flags to check in __switch_to() */
152154
#define _TIF_WORK_CTXSW \
153-
(_TIF_IO_BITMAP|_TIF_NOCPUID|_TIF_NOTSC|_TIF_BLOCKSTEP)
155+
(_TIF_IO_BITMAP|_TIF_NOCPUID|_TIF_NOTSC|_TIF_BLOCKSTEP|_TIF_RDS)
154156

155157
#define _TIF_WORK_CTXSW_PREV (_TIF_WORK_CTXSW|_TIF_USER_RETURN_NOTIFY)
156158
#define _TIF_WORK_CTXSW_NEXT (_TIF_WORK_CTXSW)

arch/x86/kernel/cpu/bugs.c

Lines changed: 29 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -226,27 +226,51 @@ EXPORT_SYMBOL_GPL(x86_spec_ctrl_set);
226226

227227
u64 x86_spec_ctrl_get_default(void)
228228
{
229-
return x86_spec_ctrl_base;
229+
u64 msrval = x86_spec_ctrl_base;
230+
231+
if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
232+
msrval |= rds_tif_to_spec_ctrl(current_thread_info()->flags);
233+
return msrval;
230234
}
231235
EXPORT_SYMBOL_GPL(x86_spec_ctrl_get_default);
232236

233237
void x86_spec_ctrl_set_guest(u64 guest_spec_ctrl)
234238
{
239+
u64 host = x86_spec_ctrl_base;
240+
235241
if (!ibrs_supported)
236242
return;
237-
if (ibrs_inuse || x86_spec_ctrl_base != guest_spec_ctrl)
243+
244+
if (ibrs_inuse) {
245+
wrmsrl(MSR_IA32_SPEC_CTRL, guest_spec_ctrl);
246+
return;
247+
}
248+
249+
if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
250+
host |= rds_tif_to_spec_ctrl(current_thread_info()->flags);
251+
252+
if (host != guest_spec_ctrl)
238253
wrmsrl(MSR_IA32_SPEC_CTRL, guest_spec_ctrl);
239254
}
240255
EXPORT_SYMBOL_GPL(x86_spec_ctrl_set_guest);
241256

242257
void x86_spec_ctrl_restore_host(u64 guest_spec_ctrl)
243258
{
259+
u64 host = x86_spec_ctrl_base;
260+
244261
if (!ibrs_supported)
245262
return;
246-
if (ibrs_inuse)
263+
264+
if (ibrs_inuse) {
247265
wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_priv);
248-
else if (x86_spec_ctrl_base != guest_spec_ctrl)
249-
wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
266+
return;
267+
}
268+
269+
if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
270+
host |= rds_tif_to_spec_ctrl(current_thread_info()->flags);
271+
272+
if (host != guest_spec_ctrl)
273+
wrmsrl(MSR_IA32_SPEC_CTRL, host);
250274
}
251275
EXPORT_SYMBOL_GPL(x86_spec_ctrl_restore_host);
252276

arch/x86/kernel/process.c

Lines changed: 22 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -39,6 +39,7 @@
3939
#include <asm/switch_to.h>
4040
#include <asm/desc.h>
4141
#include <asm/prctl.h>
42+
#include <asm/spec-ctrl.h>
4243

4344
/*
4445
* per-CPU TSS segments. Threads are completely 'soft' on Linux,
@@ -279,6 +280,24 @@ static inline void switch_to_bitmap(struct tss_struct *tss,
279280
}
280281
}
281282

283+
static __always_inline void __speculative_store_bypass_update(unsigned long tifn)
284+
{
285+
u64 msr;
286+
287+
if (static_cpu_has(X86_FEATURE_AMD_RDS)) {
288+
msr = x86_amd_ls_cfg_base | rds_tif_to_amd_ls_cfg(tifn);
289+
wrmsrl(MSR_AMD64_LS_CFG, msr);
290+
} else {
291+
msr = x86_spec_ctrl_base | rds_tif_to_spec_ctrl(tifn);
292+
wrmsrl(MSR_IA32_SPEC_CTRL, msr);
293+
}
294+
}
295+
296+
void speculative_store_bypass_update(void)
297+
{
298+
__speculative_store_bypass_update(current_thread_info()->flags);
299+
}
300+
282301
void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p,
283302
struct tss_struct *tss)
284303
{
@@ -310,6 +329,9 @@ void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p,
310329

311330
if ((tifp ^ tifn) & _TIF_NOCPUID)
312331
set_cpuid_faulting(!!(tifn & _TIF_NOCPUID));
332+
333+
if ((tifp ^ tifn) & _TIF_RDS)
334+
__speculative_store_bypass_update(tifn);
313335
}
314336

315337
/*

0 commit comments

Comments
 (0)