Skip to content

Commit 07853ad

Browse files
jpoimboesuryasaimadhu
authored andcommitted
KVM: VMX: Prevent RSB underflow before vmenter
On VMX, there are some balanced returns between the time the guest's SPEC_CTRL value is written, and the vmenter. Balanced returns (matched by a preceding call) are usually ok, but it's at least theoretically possible an NMI with a deep call stack could empty the RSB before one of the returns. For maximum paranoia, don't allow *any* returns (balanced or otherwise) between the SPEC_CTRL write and the vmenter. [ bp: Fix 32-bit build. ] Signed-off-by: Josh Poimboeuf <[email protected]> Signed-off-by: Peter Zijlstra (Intel) <[email protected]> Signed-off-by: Borislav Petkov <[email protected]>
1 parent 9756bba commit 07853ad

File tree

7 files changed

+42
-15
lines changed

7 files changed

+42
-15
lines changed

arch/x86/kernel/asm-offsets.c

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -19,6 +19,7 @@
1919
#include <asm/suspend.h>
2020
#include <asm/tlbflush.h>
2121
#include <asm/tdx.h>
22+
#include "../kvm/vmx/vmx.h"
2223

2324
#ifdef CONFIG_XEN
2425
#include <xen/interface/xen.h>
@@ -107,4 +108,9 @@ static void __used common(void)
107108
OFFSET(TSS_sp0, tss_struct, x86_tss.sp0);
108109
OFFSET(TSS_sp1, tss_struct, x86_tss.sp1);
109110
OFFSET(TSS_sp2, tss_struct, x86_tss.sp2);
111+
112+
if (IS_ENABLED(CONFIG_KVM_INTEL)) {
113+
BLANK();
114+
OFFSET(VMX_spec_ctrl, vcpu_vmx, spec_ctrl);
115+
}
110116
}

arch/x86/kernel/cpu/bugs.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -196,8 +196,8 @@ void __init check_bugs(void)
196196
}
197197

198198
/*
199-
* NOTE: For VMX, this function is not called in the vmexit path.
200-
* It uses vmx_spec_ctrl_restore_host() instead.
199+
* NOTE: This function is *only* called for SVM. VMX spec_ctrl handling is
200+
* done in vmenter.S.
201201
*/
202202
void
203203
x86_virt_spec_ctrl(u64 guest_spec_ctrl, u64 guest_virt_spec_ctrl, bool setguest)

arch/x86/kvm/vmx/capabilities.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -4,8 +4,8 @@
44

55
#include <asm/vmx.h>
66

7-
#include "lapic.h"
8-
#include "x86.h"
7+
#include "../lapic.h"
8+
#include "../x86.h"
99

1010
extern bool __read_mostly enable_vpid;
1111
extern bool __read_mostly flexpriority_enabled;

arch/x86/kvm/vmx/vmenter.S

Lines changed: 29 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,9 +1,11 @@
11
/* SPDX-License-Identifier: GPL-2.0 */
22
#include <linux/linkage.h>
33
#include <asm/asm.h>
4+
#include <asm/asm-offsets.h>
45
#include <asm/bitsperlong.h>
56
#include <asm/kvm_vcpu_regs.h>
67
#include <asm/nospec-branch.h>
8+
#include <asm/percpu.h>
79
#include <asm/segment.h>
810
#include "run_flags.h"
911

@@ -73,6 +75,33 @@ SYM_FUNC_START(__vmx_vcpu_run)
7375
lea (%_ASM_SP), %_ASM_ARG2
7476
call vmx_update_host_rsp
7577

78+
ALTERNATIVE "jmp .Lspec_ctrl_done", "", X86_FEATURE_MSR_SPEC_CTRL
79+
80+
/*
81+
* SPEC_CTRL handling: if the guest's SPEC_CTRL value differs from the
82+
* host's, write the MSR.
83+
*
84+
* IMPORTANT: To avoid RSB underflow attacks and any other nastiness,
85+
* there must not be any returns or indirect branches between this code
86+
* and vmentry.
87+
*/
88+
mov 2*WORD_SIZE(%_ASM_SP), %_ASM_DI
89+
movl VMX_spec_ctrl(%_ASM_DI), %edi
90+
movl PER_CPU_VAR(x86_spec_ctrl_current), %esi
91+
cmp %edi, %esi
92+
je .Lspec_ctrl_done
93+
mov $MSR_IA32_SPEC_CTRL, %ecx
94+
xor %edx, %edx
95+
mov %edi, %eax
96+
wrmsr
97+
98+
.Lspec_ctrl_done:
99+
100+
/*
101+
* Since vmentry is serializing on affected CPUs, there's no need for
102+
* an LFENCE to stop speculation from skipping the wrmsr.
103+
*/
104+
76105
/* Load @regs to RAX. */
77106
mov (%_ASM_SP), %_ASM_AX
78107

arch/x86/kvm/vmx/vmx.c

Lines changed: 0 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -6988,14 +6988,6 @@ static fastpath_t vmx_vcpu_run(struct kvm_vcpu *vcpu)
69886988

69896989
kvm_wait_lapic_expire(vcpu);
69906990

6991-
/*
6992-
* If this vCPU has touched SPEC_CTRL, restore the guest's value if
6993-
* it's non-zero. Since vmentry is serialising on affected CPUs, there
6994-
* is no need to worry about the conditional branch over the wrmsr
6995-
* being speculatively taken.
6996-
*/
6997-
x86_spec_ctrl_set_guest(vmx->spec_ctrl, 0);
6998-
69996991
/* The actual VMENTER/EXIT is in the .noinstr.text section. */
70006992
vmx_vcpu_enter_exit(vcpu, vmx, __vmx_vcpu_run_flags(vmx));
70016993

arch/x86/kvm/vmx/vmx.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -8,11 +8,11 @@
88
#include <asm/intel_pt.h>
99

1010
#include "capabilities.h"
11-
#include "kvm_cache_regs.h"
11+
#include "../kvm_cache_regs.h"
1212
#include "posted_intr.h"
1313
#include "vmcs.h"
1414
#include "vmx_ops.h"
15-
#include "cpuid.h"
15+
#include "../cpuid.h"
1616
#include "run_flags.h"
1717

1818
#define MSR_TYPE_R 1

arch/x86/kvm/vmx/vmx_ops.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -8,7 +8,7 @@
88

99
#include "evmcs.h"
1010
#include "vmcs.h"
11-
#include "x86.h"
11+
#include "../x86.h"
1212

1313
asmlinkage void vmread_error(unsigned long field, bool fault);
1414
__attribute__((regparm(0))) void vmread_error_trampoline(unsigned long field,

0 commit comments

Comments
 (0)