Skip to content

Commit 3b29a60

Browse files
committed
x86/bugs: Rename _RDS to _SSBD
Intel collateral will reference the SSB mitigation bit in IA32_SPEC_CTL[2] as SSBD (Speculative Store Bypass Disable). Hence changing it. It is unclear yet what the MSR_IA32_ARCH_CAPABILITIES (0x10a) Bit(4) name is going to be. Following the rename it would be SSBD_NO but that rolls out to Speculative Store Bypass Disable No. Also fixed the missing space in X86_FEATURE_AMD_SSBD. [ tglx: Fixup x86_amd_rds_enable() and rds_tif_to_amd_ls_cfg() as well ] Signed-off-by: Konrad Rzeszutek Wilk <[email protected]> Signed-off-by: Thomas Gleixner <[email protected]> (cherry picked from commit 9f65fb2) Orabug: 28034177 CVE: CVE-2018-3639 Signed-off-by: Konrad Rzeszutek Wilk <[email protected]> Tested-by: Mihai Carabas <[email protected]> Reviewed-by: Mihai Carabas <[email protected]> Reviewed-by: John Haxby <[email protected]> Conflicts: arch/x86/include/asm/cpufeatures.h arch/x86/kernel/cpu/bugs.c [As we have the IBRS code]
1 parent 3acae02 commit 3b29a60

File tree

12 files changed

+61
-61
lines changed

12 files changed

+61
-61
lines changed

Documentation/admin-guide/kernel-parameters.txt

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -4000,7 +4000,7 @@
40004000
skylake=off - do not use IBRS if present on Skylake
40014001
instead of retpoline (this is equivalant
40024002
to spectre_v2=retpoline,generic).
4003-
rds=off - do not activate the Speculative Store Bypass
4003+
ssbd=off - do not activate the Speculative Store Bypass
40044004
mitigation if doing IBRS.
40054005

40064006
spia_io_base= [HW,MTD]

arch/x86/include/asm/cpufeatures.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -214,7 +214,7 @@
214214
#define X86_FEATURE_USE_IBPB ( 7*32+21) /* "" Indirect Branch Prediction Barrier enabled */
215215
#define X86_FEATURE_USE_IBRS_FW ( 7*32+22) /* "" Use IBRS during runtime firmware calls */
216216
#define X86_FEATURE_IBRS_ALL ( 7*32+23) /* IBRS all the time */
217-
#define X86_FEATURE_AMD_RDS (7*32+24) /* "" AMD RDS implementation */
217+
#define X86_FEATURE_AMD_SSBD ( 7*32+24) /* "" AMD SSBD implementation */
218218
#define X86_FEATURE_VMEXIT_RSB_FULL ( 7*32+27) /* "" Whether to stuff the RSB on VMEXIT. */
219219
#define X86_FEATURE_STUFF_RSB ( 7*32+28) /* "" Whether to stuff the RSB (usually dependent on !SMEP) */
220220
#define X86_FEATURE_SPEC_STORE_BYPASS_DISABLE ( 7*32+29) /* "" Disable Speculative Store Bypass. */
@@ -337,7 +337,7 @@
337337
#define X86_FEATURE_SPEC_CTRL (18*32+26) /* "" Speculation Control (IBRS + IBPB) */
338338
#define X86_FEATURE_INTEL_STIBP (18*32+27) /* "" Single Thread Indirect Branch Predictors */
339339
#define X86_FEATURE_ARCH_CAPABILITIES (18*32+29) /* IA32_ARCH_CAPABILITIES MSR (Intel) */
340-
#define X86_FEATURE_RDS (18*32+31) /* Reduced Data Speculation */
340+
#define X86_FEATURE_SSBD (18*32+31) /* Speculative Store Bypass Disable */
341341

342342
/*
343343
* BUG word(s)

arch/x86/include/asm/msr-index.h

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -44,8 +44,8 @@
4444
#define SPEC_CTRL_IBRS (1 << 0)
4545
#define SPEC_CTRL_FEATURE_ENABLE_IBRS SPEC_CTRL_IBRS /* Indirect Branch Restricted Speculation */
4646
#define SPEC_CTRL_STIBP (1 << 1) /* Single Thread Indirect Branch Predictors */
47-
#define SPEC_CTRL_RDS_SHIFT 2 /* Reduced Data Speculation bit */
48-
#define SPEC_CTRL_RDS (1 << SPEC_CTRL_RDS_SHIFT) /* Reduced Data Speculation */
47+
#define SPEC_CTRL_SSBD_SHIFT 2 /* Speculative Store Bypass Disable bit */
48+
#define SPEC_CTRL_SSBD (1 << SPEC_CTRL_SSBD_SHIFT) /* Speculative Store Bypass Disable */
4949

5050
#define MSR_IA32_PRED_CMD 0x00000049 /* Prediction Command */
5151
#define PRED_CMD_IBPB (1 << 0) /* Indirect Branch Prediction Barrier */
@@ -72,10 +72,10 @@
7272
#define MSR_IA32_ARCH_CAPABILITIES 0x0000010a
7373
#define ARCH_CAP_RDCL_NO (1 << 0) /* Not susceptible to Meltdown */
7474
#define ARCH_CAP_IBRS_ALL (1 << 1) /* Enhanced IBRS support */
75-
#define ARCH_CAP_RDS_NO (1 << 4) /*
75+
#define ARCH_CAP_SSBD_NO (1 << 4) /*
7676
* Not susceptible to Speculative Store Bypass
77-
* attack, so no Reduced Data Speculation control
78-
* required.
77+
* attack, so no Speculative Store Bypass
78+
* control required.
7979
*/
8080

8181
#define MSR_IA32_BBL_CR_CTL 0x00000119

arch/x86/include/asm/spec-ctrl.h

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -17,20 +17,20 @@ extern void x86_spec_ctrl_restore_host(u64);
1717

1818
/* AMD specific Speculative Store Bypass MSR data */
1919
extern u64 x86_amd_ls_cfg_base;
20-
extern u64 x86_amd_ls_cfg_rds_mask;
20+
extern u64 x86_amd_ls_cfg_ssbd_mask;
2121

2222
/* The Intel SPEC CTRL MSR base value cache */
2323
extern u64 x86_spec_ctrl_base;
2424

25-
static inline u64 rds_tif_to_spec_ctrl(u64 tifn)
25+
static inline u64 ssbd_tif_to_spec_ctrl(u64 tifn)
2626
{
27-
BUILD_BUG_ON(TIF_RDS < SPEC_CTRL_RDS_SHIFT);
28-
return (tifn & _TIF_RDS) >> (TIF_RDS - SPEC_CTRL_RDS_SHIFT);
27+
BUILD_BUG_ON(TIF_SSBD < SPEC_CTRL_SSBD_SHIFT);
28+
return (tifn & _TIF_SSBD) >> (TIF_SSBD - SPEC_CTRL_SSBD_SHIFT);
2929
}
3030

31-
static inline u64 rds_tif_to_amd_ls_cfg(u64 tifn)
31+
static inline u64 ssbd_tif_to_amd_ls_cfg(u64 tifn)
3232
{
33-
return (tifn & _TIF_RDS) ? x86_amd_ls_cfg_rds_mask : 0ULL;
33+
return (tifn & _TIF_SSBD) ? x86_amd_ls_cfg_ssbd_mask : 0ULL;
3434
}
3535

3636
extern void speculative_store_bypass_update(void);

arch/x86/include/asm/thread_info.h

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -81,7 +81,7 @@ struct thread_info {
8181
#define TIF_SIGPENDING 2 /* signal pending */
8282
#define TIF_NEED_RESCHED 3 /* rescheduling necessary */
8383
#define TIF_SINGLESTEP 4 /* reenable singlestep on user return*/
84-
#define TIF_RDS 5 /* Reduced data speculation */
84+
#define TIF_SSBD 5 /* Reduced data speculation */
8585
#define TIF_SYSCALL_EMU 6 /* syscall emulation active */
8686
#define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */
8787
#define TIF_SECCOMP 8 /* secure computing */
@@ -110,7 +110,7 @@ struct thread_info {
110110
#define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
111111
#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)
112112
#define _TIF_SINGLESTEP (1 << TIF_SINGLESTEP)
113-
#define _TIF_RDS (1 << TIF_RDS)
113+
#define _TIF_SSBD (1 << TIF_SSBD)
114114
#define _TIF_SYSCALL_EMU (1 << TIF_SYSCALL_EMU)
115115
#define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT)
116116
#define _TIF_SECCOMP (1 << TIF_SECCOMP)
@@ -152,7 +152,7 @@ struct thread_info {
152152

153153
/* flags to check in __switch_to() */
154154
#define _TIF_WORK_CTXSW \
155-
(_TIF_IO_BITMAP|_TIF_NOCPUID|_TIF_NOTSC|_TIF_BLOCKSTEP|_TIF_RDS)
155+
(_TIF_IO_BITMAP|_TIF_NOCPUID|_TIF_NOTSC|_TIF_BLOCKSTEP|_TIF_SSBD)
156156

157157
#define _TIF_WORK_CTXSW_PREV (_TIF_WORK_CTXSW|_TIF_USER_RETURN_NOTIFY)
158158
#define _TIF_WORK_CTXSW_NEXT (_TIF_WORK_CTXSW)

arch/x86/kernel/cpu/amd.c

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -567,12 +567,12 @@ static void bsp_init_amd(struct cpuinfo_x86 *c)
567567
}
568568
/*
569569
* Try to cache the base value so further operations can
570-
* avoid RMW. If that faults, do not enable RDS.
570+
* avoid RMW. If that faults, do not enable SSBD.
571571
*/
572572
if (!rdmsrl_safe(MSR_AMD64_LS_CFG, &x86_amd_ls_cfg_base)) {
573-
setup_force_cpu_cap(X86_FEATURE_RDS);
574-
setup_force_cpu_cap(X86_FEATURE_AMD_RDS);
575-
x86_amd_ls_cfg_rds_mask = 1ULL << bit;
573+
setup_force_cpu_cap(X86_FEATURE_SSBD);
574+
setup_force_cpu_cap(X86_FEATURE_AMD_SSBD);
575+
x86_amd_ls_cfg_ssbd_mask = 1ULL << bit;
576576
}
577577
}
578578
}
@@ -920,9 +920,9 @@ static void init_amd(struct cpuinfo_x86 *c)
920920
if (!cpu_has(c, X86_FEATURE_XENPV))
921921
set_cpu_bug(c, X86_BUG_SYSRET_SS_ATTRS);
922922

923-
if (boot_cpu_has(X86_FEATURE_AMD_RDS)) {
924-
set_cpu_cap(c, X86_FEATURE_RDS);
925-
set_cpu_cap(c, X86_FEATURE_AMD_RDS);
923+
if (boot_cpu_has(X86_FEATURE_AMD_SSBD)) {
924+
set_cpu_cap(c, X86_FEATURE_SSBD);
925+
set_cpu_cap(c, X86_FEATURE_AMD_SSBD);
926926
}
927927
}
928928

arch/x86/kernel/cpu/bugs.c

Lines changed: 27 additions & 27 deletions
Original file line numberDiff line numberDiff line change
@@ -54,7 +54,7 @@ EXPORT_SYMBOL(spec_ctrl_mutex);
5454
bool use_ibrs_on_skylake = true;
5555
EXPORT_SYMBOL(use_ibrs_on_skylake);
5656

57-
bool use_ibrs_with_rds = true;
57+
bool use_ibrs_with_ssbd = true;
5858

5959
int __init spectre_v2_heuristics_setup(char *p)
6060
{
@@ -64,7 +64,7 @@ int __init spectre_v2_heuristics_setup(char *p)
6464
/* Disable all heuristics. */
6565
if (!strncmp(p, "off", 3)) {
6666
use_ibrs_on_skylake = false;
67-
use_ibrs_with_rds = false;
67+
use_ibrs_with_ssbd = false;
6868
break;
6969
}
7070
len = strlen("skylake");
@@ -85,7 +85,7 @@ int __init spectre_v2_heuristics_setup(char *p)
8585
if (*p == '\0')
8686
break;
8787
if (!strncmp(p, "off", 3))
88-
use_ibrs_with_rds = false;
88+
use_ibrs_with_ssbd = false;
8989
}
9090

9191
p = strpbrk(p, ",");
@@ -99,7 +99,7 @@ __setup("spectre_v2_heuristics=", spectre_v2_heuristics_setup);
9999

100100
static void __init spectre_v2_select_mitigation(void);
101101
static void __init ssb_select_mitigation(void);
102-
static bool rds_ibrs_selected(void);
102+
static bool ssbd_ibrs_selected(void);
103103

104104
/*
105105
* Our boot-time value of the SPEC_CTRL MSR. We read it once so that any
@@ -123,10 +123,10 @@ static u64 __ro_after_init x86_spec_ctrl_mask = ~SPEC_CTRL_IBRS;
123123

124124
/*
125125
* AMD specific MSR info for Speculative Store Bypass control.
126-
* x86_amd_ls_cfg_rds_mask is initialized in identify_boot_cpu().
126+
* x86_amd_ls_cfg_ssbd_mask is initialized in identify_boot_cpu().
127127
*/
128128
u64 __ro_after_init x86_amd_ls_cfg_base;
129-
u64 __ro_after_init x86_amd_ls_cfg_rds_mask;
129+
u64 __ro_after_init x86_amd_ls_cfg_ssbd_mask;
130130

131131
void __init check_bugs(void)
132132
{
@@ -226,11 +226,11 @@ void x86_spec_ctrl_set(u64 val)
226226
/*
227227
* Only two states are allowed - with IBRS or without.
228228
*/
229-
if (rds_ibrs_selected()) {
229+
if (ssbd_ibrs_selected()) {
230230
if (val & SPEC_CTRL_IBRS)
231231
host = x86_spec_ctrl_priv;
232232
else
233-
host = val & ~(SPEC_CTRL_RDS);
233+
host = val & ~(SPEC_CTRL_SSBD);
234234
} else {
235235
if (ibrs_inuse)
236236
host = x86_spec_ctrl_priv;
@@ -248,7 +248,7 @@ u64 x86_spec_ctrl_get_default(void)
248248
u64 msrval = x86_spec_ctrl_base;
249249

250250
if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
251-
msrval |= rds_tif_to_spec_ctrl(current_thread_info()->flags);
251+
msrval |= ssbd_tif_to_spec_ctrl(current_thread_info()->flags);
252252
return msrval;
253253
}
254254
EXPORT_SYMBOL_GPL(x86_spec_ctrl_get_default);
@@ -266,7 +266,7 @@ void x86_spec_ctrl_set_guest(u64 guest_spec_ctrl)
266266
}
267267

268268
if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
269-
host |= rds_tif_to_spec_ctrl(current_thread_info()->flags);
269+
host |= ssbd_tif_to_spec_ctrl(current_thread_info()->flags);
270270

271271
if (host != guest_spec_ctrl)
272272
wrmsrl(MSR_IA32_SPEC_CTRL, guest_spec_ctrl);
@@ -286,18 +286,18 @@ void x86_spec_ctrl_restore_host(u64 guest_spec_ctrl)
286286
}
287287

288288
if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
289-
host |= rds_tif_to_spec_ctrl(current_thread_info()->flags);
289+
host |= ssbd_tif_to_spec_ctrl(current_thread_info()->flags);
290290

291291
if (host != guest_spec_ctrl)
292292
wrmsrl(MSR_IA32_SPEC_CTRL, host);
293293
}
294294
EXPORT_SYMBOL_GPL(x86_spec_ctrl_restore_host);
295295

296-
static void x86_amd_rds_enable(void)
296+
static void x86_amd_ssb_disable(void)
297297
{
298-
u64 msrval = x86_amd_ls_cfg_base | x86_amd_ls_cfg_rds_mask;
298+
u64 msrval = x86_amd_ls_cfg_base | x86_amd_ls_cfg_ssbd_mask;
299299

300-
if (boot_cpu_has(X86_FEATURE_AMD_RDS))
300+
if (boot_cpu_has(X86_FEATURE_AMD_SSBD))
301301
wrmsrl(MSR_AMD64_LS_CFG, msrval);
302302
}
303303

@@ -606,7 +606,7 @@ static void __init spectre_v2_select_mitigation(void)
606606
if (!retp_compiler() /* prefer IBRS over minimal ASM */ ||
607607
(retp_compiler() && !retpoline_selected(cmd) &&
608608
((is_skylake_era() && use_ibrs_on_skylake) ||
609-
(rds_ibrs_selected() && use_ibrs_with_rds)))) {
609+
(ssbd_ibrs_selected() && use_ibrs_with_ssbd)))) {
610610
/* Start the engine! */
611611
mode = ibrs_select();
612612
if (mode == SPECTRE_V2_IBRS)
@@ -668,7 +668,7 @@ static void __init spectre_v2_select_mitigation(void)
668668

669669
static enum ssb_mitigation ssb_mode = SPEC_STORE_BYPASS_NONE;
670670

671-
bool rds_ibrs_selected(void)
671+
bool ssbd_ibrs_selected(void)
672672
{
673673
return (ssb_mode == SPEC_STORE_BYPASS_USERSPACE);
674674
}
@@ -739,7 +739,7 @@ static enum ssb_mitigation_cmd __init __ssb_select_mitigation(void)
739739
enum ssb_mitigation mode = SPEC_STORE_BYPASS_NONE;
740740
enum ssb_mitigation_cmd cmd;
741741

742-
if (!boot_cpu_has(X86_FEATURE_RDS))
742+
if (!boot_cpu_has(X86_FEATURE_SSBD))
743743
return mode;
744744

745745
cmd = ssb_parse_cmdline();
@@ -782,7 +782,7 @@ static enum ssb_mitigation_cmd __init __ssb_select_mitigation(void)
782782
/*
783783
* We have three CPU feature flags that are in play here:
784784
* - X86_BUG_SPEC_STORE_BYPASS - CPU is susceptible.
785-
* - X86_FEATURE_RDS - CPU is able to turn off speculative store bypass
785+
* - X86_FEATURE_SSBD - CPU is able to turn off speculative store bypass
786786
* - X86_FEATURE_SPEC_STORE_BYPASS_DISABLE - engage the mitigation
787787
*/
788788
if (mode == SPEC_STORE_BYPASS_DISABLE)
@@ -796,17 +796,17 @@ static enum ssb_mitigation_cmd __init __ssb_select_mitigation(void)
796796
*/
797797
switch (boot_cpu_data.x86_vendor) {
798798
case X86_VENDOR_INTEL:
799-
x86_spec_ctrl_base |= SPEC_CTRL_RDS;
799+
x86_spec_ctrl_base |= SPEC_CTRL_SSBD;
800800
if (mode == SPEC_STORE_BYPASS_DISABLE) {
801-
x86_spec_ctrl_mask &= ~(SPEC_CTRL_RDS);
802-
x86_spec_ctrl_set(SPEC_CTRL_RDS);
801+
x86_spec_ctrl_mask &= ~(SPEC_CTRL_SSBD);
802+
x86_spec_ctrl_set(SPEC_CTRL_SSBD);
803803
}
804804
else
805-
x86_spec_ctrl_priv &= ~(SPEC_CTRL_RDS);
805+
x86_spec_ctrl_priv &= ~(SPEC_CTRL_SSBD);
806806
break;
807807
case X86_VENDOR_AMD:
808808
if (mode == SPEC_STORE_BYPASS_DISABLE)
809-
x86_amd_rds_enable();
809+
x86_amd_ssb_disable();
810810
break;
811811
}
812812
}
@@ -839,16 +839,16 @@ static int ssb_prctl_set(struct task_struct *task, unsigned long ctrl)
839839
if (task_spec_ssb_force_disable(task))
840840
return -EPERM;
841841
task_clear_spec_ssb_disable(task);
842-
update = test_and_clear_tsk_thread_flag(task, TIF_RDS);
842+
update = test_and_clear_tsk_thread_flag(task, TIF_SSBD);
843843
break;
844844
case PR_SPEC_DISABLE:
845845
task_set_spec_ssb_disable(task);
846-
update = !test_and_set_tsk_thread_flag(task, TIF_RDS);
846+
update = !test_and_set_tsk_thread_flag(task, TIF_SSBD);
847847
break;
848848
case PR_SPEC_FORCE_DISABLE:
849849
task_set_spec_ssb_disable(task);
850850
task_set_spec_ssb_force_disable(task);
851-
update = !test_and_set_tsk_thread_flag(task, TIF_RDS);
851+
update = !test_and_set_tsk_thread_flag(task, TIF_SSBD);
852852
break;
853853
default:
854854
return -ERANGE;
@@ -919,7 +919,7 @@ void x86_spec_ctrl_setup_ap(void)
919919
x86_spec_ctrl_set(x86_spec_ctrl_base & ~x86_spec_ctrl_mask);
920920

921921
if (ssb_mode == SPEC_STORE_BYPASS_DISABLE)
922-
x86_amd_rds_enable();
922+
x86_amd_ssb_disable();
923923
}
924924

925925
#ifdef CONFIG_SYSFS

arch/x86/kernel/cpu/common.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -984,7 +984,7 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
984984
rdmsrl(MSR_IA32_ARCH_CAPABILITIES, ia32_cap);
985985

986986
if (!x86_match_cpu(cpu_no_spec_store_bypass) &&
987-
!(ia32_cap & ARCH_CAP_RDS_NO))
987+
!(ia32_cap & ARCH_CAP_SSBD_NO))
988988
setup_force_cpu_bug(X86_BUG_SPEC_STORE_BYPASS);
989989

990990
if (x86_match_cpu(cpu_no_speculation))

arch/x86/kernel/cpu/intel.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -189,7 +189,7 @@ static void early_init_intel(struct cpuinfo_x86 *c)
189189
setup_clear_cpu_cap(X86_FEATURE_STIBP);
190190
setup_clear_cpu_cap(X86_FEATURE_SPEC_CTRL);
191191
setup_clear_cpu_cap(X86_FEATURE_INTEL_STIBP);
192-
setup_clear_cpu_cap(X86_FEATURE_RDS);
192+
setup_clear_cpu_cap(X86_FEATURE_SSBD);
193193
}
194194

195195
/*

arch/x86/kernel/process.c

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -284,11 +284,11 @@ static __always_inline void __speculative_store_bypass_update(unsigned long tifn
284284
{
285285
u64 msr;
286286

287-
if (static_cpu_has(X86_FEATURE_AMD_RDS)) {
288-
msr = x86_amd_ls_cfg_base | rds_tif_to_amd_ls_cfg(tifn);
287+
if (static_cpu_has(X86_FEATURE_AMD_SSBD)) {
288+
msr = x86_amd_ls_cfg_base | ssbd_tif_to_amd_ls_cfg(tifn);
289289
wrmsrl(MSR_AMD64_LS_CFG, msr);
290290
} else {
291-
msr = x86_spec_ctrl_base | rds_tif_to_spec_ctrl(tifn);
291+
msr = x86_spec_ctrl_base | ssbd_tif_to_spec_ctrl(tifn);
292292
wrmsrl(MSR_IA32_SPEC_CTRL, msr);
293293
}
294294
}
@@ -330,7 +330,7 @@ void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p,
330330
if ((tifp ^ tifn) & _TIF_NOCPUID)
331331
set_cpuid_faulting(!!(tifn & _TIF_NOCPUID));
332332

333-
if ((tifp ^ tifn) & _TIF_RDS)
333+
if ((tifp ^ tifn) & _TIF_SSBD)
334334
__speculative_store_bypass_update(tifn);
335335
}
336336

arch/x86/kvm/cpuid.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -402,7 +402,7 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
402402

403403
/* cpuid 7.0.edx*/
404404
const u32 kvm_cpuid_7_0_edx_x86_features =
405-
F(AVX512_4VNNIW) | F(AVX512_4FMAPS) | F(SPEC_CTRL) | F(RDS) |
405+
F(AVX512_4VNNIW) | F(AVX512_4FMAPS) | F(SPEC_CTRL) | F(SSBD) |
406406
F(ARCH_CAPABILITIES);
407407

408408
/* all calls to cpuid_count() should be made on the same cpu */

0 commit comments

Comments
 (0)