Skip to content

Commit 8160a56

Browse files
committed
Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm
Pull kvm fixes from Paolo Bonzini: "Bugfixes, and a few cleanups to the newly-introduced assembly language vmentry code for AMD" * tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm: KVM: PPC: Book3S HV: Handle non-present PTEs in page fault functions kvm: Disable objtool frame pointer checking for vmenter.S MAINTAINERS: add a reviewer for KVM/s390 KVM: s390: Fix PV check in deliverable_irqs() kvm: Handle reads of SandyBridge RAPL PMU MSRs rather than injecting #GP KVM: Remove CREATE_IRQCHIP/SET_PIT2 race KVM: SVM: Fix __svm_vcpu_run declaration. KVM: SVM: Do not setup frame pointer in __svm_vcpu_run KVM: SVM: Fix build error due to missing release_pages() include KVM: SVM: Do not mark svm_vcpu_run with STACK_FRAME_NON_STANDARD kvm: nVMX: match comment with return type for nested_vmx_exit_reflected kvm: nVMX: reflect MTF VM-exits if injected by L1 KVM: s390: Return last valid slot if approx index is out-of-bounds KVM: Check validity of resolved slot when searching memslots KVM: VMX: Enable machine check support for 32bit targets KVM: SVM: move more vmentry code to assembly KVM: SVM: fix compilation with modular PSP and non-modular KVM
2 parents 189522d + 00a6a5e commit 8160a56

File tree

14 files changed

+74
-47
lines changed

14 files changed

+74
-47
lines changed

MAINTAINERS

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -9329,6 +9329,7 @@ M: Christian Borntraeger <[email protected]>
93299329
M: Janosch Frank <[email protected]>
93309330
R: David Hildenbrand <[email protected]>
93319331
R: Cornelia Huck <[email protected]>
9332+
R: Claudio Imbrenda <[email protected]>
93329333
93339334
S: Supported
93349335
W: http://www.ibm.com/developerworks/linux/linux390/

arch/powerpc/kvm/book3s_64_mmu_hv.c

Lines changed: 5 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -604,18 +604,19 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
604604
*/
605605
local_irq_disable();
606606
ptep = __find_linux_pte(vcpu->arch.pgdir, hva, NULL, &shift);
607+
pte = __pte(0);
608+
if (ptep)
609+
pte = *ptep;
610+
local_irq_enable();
607611
/*
608612
* If the PTE disappeared temporarily due to a THP
609613
* collapse, just return and let the guest try again.
610614
*/
611-
if (!ptep) {
612-
local_irq_enable();
615+
if (!pte_present(pte)) {
613616
if (page)
614617
put_page(page);
615618
return RESUME_GUEST;
616619
}
617-
pte = *ptep;
618-
local_irq_enable();
619620
hpa = pte_pfn(pte) << PAGE_SHIFT;
620621
pte_size = PAGE_SIZE;
621622
if (shift)

arch/powerpc/kvm/book3s_64_mmu_radix.c

Lines changed: 5 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -815,18 +815,19 @@ int kvmppc_book3s_instantiate_page(struct kvm_vcpu *vcpu,
815815
*/
816816
local_irq_disable();
817817
ptep = __find_linux_pte(vcpu->arch.pgdir, hva, NULL, &shift);
818+
pte = __pte(0);
819+
if (ptep)
820+
pte = *ptep;
821+
local_irq_enable();
818822
/*
819823
* If the PTE disappeared temporarily due to a THP
820824
* collapse, just return and let the guest try again.
821825
*/
822-
if (!ptep) {
823-
local_irq_enable();
826+
if (!pte_present(pte)) {
824827
if (page)
825828
put_page(page);
826829
return RESUME_GUEST;
827830
}
828-
pte = *ptep;
829-
local_irq_enable();
830831

831832
/* If we're logging dirty pages, always map single pages */
832833
large_enable = !(memslot->flags & KVM_MEM_LOG_DIRTY_PAGES);

arch/s390/kvm/interrupt.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -393,7 +393,7 @@ static unsigned long deliverable_irqs(struct kvm_vcpu *vcpu)
393393
if (psw_mchk_disabled(vcpu))
394394
active_mask &= ~IRQ_PEND_MCHK_MASK;
395395
/* PV guest cpus can have a single interruption injected at a time. */
396-
if (kvm_s390_pv_cpu_is_protected(vcpu) &&
396+
if (kvm_s390_pv_cpu_get_handle(vcpu) &&
397397
vcpu->arch.sie_block->iictl != IICTL_CODE_NONE)
398398
active_mask &= ~(IRQ_PEND_EXT_II_MASK |
399399
IRQ_PEND_IO_MASK |

arch/s390/kvm/kvm-s390.c

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1939,6 +1939,9 @@ static int gfn_to_memslot_approx(struct kvm_memslots *slots, gfn_t gfn)
19391939
start = slot + 1;
19401940
}
19411941

1942+
if (start >= slots->used_slots)
1943+
return slots->used_slots - 1;
1944+
19421945
if (gfn >= memslots[start].base_gfn &&
19431946
gfn < memslots[start].base_gfn + memslots[start].npages) {
19441947
atomic_set(&slots->lru_slot, start);

arch/x86/include/asm/nospec-branch.h

Lines changed: 0 additions & 21 deletions
Original file line numberDiff line numberDiff line change
@@ -237,27 +237,6 @@ enum ssb_mitigation {
237237
extern char __indirect_thunk_start[];
238238
extern char __indirect_thunk_end[];
239239

240-
/*
241-
* On VMEXIT we must ensure that no RSB predictions learned in the guest
242-
* can be followed in the host, by overwriting the RSB completely. Both
243-
* retpoline and IBRS mitigations for Spectre v2 need this; only on future
244-
* CPUs with IBRS_ALL *might* it be avoided.
245-
*/
246-
static inline void vmexit_fill_RSB(void)
247-
{
248-
#ifdef CONFIG_RETPOLINE
249-
unsigned long loops;
250-
251-
asm volatile (ANNOTATE_NOSPEC_ALTERNATIVE
252-
ALTERNATIVE("jmp 910f",
253-
__stringify(__FILL_RETURN_BUFFER(%0, RSB_CLEAR_LOOPS, %1)),
254-
X86_FEATURE_RETPOLINE)
255-
"910:"
256-
: "=r" (loops), ASM_CALL_CONSTRAINT
257-
: : "memory" );
258-
#endif
259-
}
260-
261240
static __always_inline
262241
void alternative_msr_write(unsigned int msr, u64 val, unsigned int feature)
263242
{

arch/x86/kvm/Makefile

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -3,6 +3,10 @@
33
ccflags-y += -Iarch/x86/kvm
44
ccflags-$(CONFIG_KVM_WERROR) += -Werror
55

6+
ifeq ($(CONFIG_FRAME_POINTER),y)
7+
OBJECT_FILES_NON_STANDARD_vmenter.o := y
8+
endif
9+
610
KVM := ../../../virt/kvm
711

812
kvm-y += $(KVM)/kvm_main.o $(KVM)/coalesced_mmio.o \

arch/x86/kvm/svm/sev.c

Lines changed: 5 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -12,6 +12,7 @@
1212
#include <linux/kernel.h>
1313
#include <linux/highmem.h>
1414
#include <linux/psp-sev.h>
15+
#include <linux/pagemap.h>
1516
#include <linux/swap.h>
1617

1718
#include "x86.h"
@@ -1117,7 +1118,7 @@ int __init sev_hardware_setup(void)
11171118
/* Maximum number of encrypted guests supported simultaneously */
11181119
max_sev_asid = cpuid_ecx(0x8000001F);
11191120

1120-
if (!max_sev_asid)
1121+
if (!svm_sev_enabled())
11211122
return 1;
11221123

11231124
/* Minimum ASID value that should be used for SEV guest */
@@ -1156,6 +1157,9 @@ int __init sev_hardware_setup(void)
11561157

11571158
void sev_hardware_teardown(void)
11581159
{
1160+
if (!svm_sev_enabled())
1161+
return;
1162+
11591163
bitmap_free(sev_asid_bitmap);
11601164
bitmap_free(sev_reclaim_asid_bitmap);
11611165

arch/x86/kvm/svm/svm.c

Lines changed: 1 addition & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -3276,7 +3276,7 @@ static void svm_cancel_injection(struct kvm_vcpu *vcpu)
32763276
svm_complete_interrupts(svm);
32773277
}
32783278

3279-
bool __svm_vcpu_run(unsigned long vmcb_pa, unsigned long *regs);
3279+
void __svm_vcpu_run(unsigned long vmcb_pa, unsigned long *regs);
32803280

32813281
static void svm_vcpu_run(struct kvm_vcpu *vcpu)
32823282
{
@@ -3330,13 +3330,8 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
33303330
*/
33313331
x86_spec_ctrl_set_guest(svm->spec_ctrl, svm->virt_spec_ctrl);
33323332

3333-
local_irq_enable();
3334-
33353333
__svm_vcpu_run(svm->vmcb_pa, (unsigned long *)&svm->vcpu.arch.regs);
33363334

3337-
/* Eliminate branch target predictions from guest mode */
3338-
vmexit_fill_RSB();
3339-
33403335
#ifdef CONFIG_X86_64
33413336
wrmsrl(MSR_GS_BASE, svm->host.gs_base);
33423337
#else
@@ -3366,8 +3361,6 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
33663361

33673362
reload_tss(vcpu);
33683363

3369-
local_irq_disable();
3370-
33713364
x86_spec_ctrl_restore_host(svm->spec_ctrl, svm->virt_spec_ctrl);
33723365

33733366
vcpu->arch.cr2 = svm->vmcb->save.cr2;
@@ -3411,7 +3404,6 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
34113404

34123405
mark_all_clean(svm->vmcb);
34133406
}
3414-
STACK_FRAME_NON_STANDARD(svm_vcpu_run);
34153407

34163408
static void svm_load_mmu_pgd(struct kvm_vcpu *vcpu, unsigned long root)
34173409
{

arch/x86/kvm/svm/vmenter.S

Lines changed: 9 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -3,6 +3,7 @@
33
#include <asm/asm.h>
44
#include <asm/bitsperlong.h>
55
#include <asm/kvm_vcpu_regs.h>
6+
#include <asm/nospec-branch.h>
67

78
#define WORD_SIZE (BITS_PER_LONG / 8)
89

@@ -35,7 +36,6 @@
3536
*/
3637
SYM_FUNC_START(__svm_vcpu_run)
3738
push %_ASM_BP
38-
mov %_ASM_SP, %_ASM_BP
3939
#ifdef CONFIG_X86_64
4040
push %r15
4141
push %r14
@@ -78,6 +78,7 @@ SYM_FUNC_START(__svm_vcpu_run)
7878
pop %_ASM_AX
7979

8080
/* Enter guest mode */
81+
sti
8182
1: vmload %_ASM_AX
8283
jmp 3f
8384
2: cmpb $0, kvm_rebooting
@@ -99,6 +100,13 @@ SYM_FUNC_START(__svm_vcpu_run)
99100
ud2
100101
_ASM_EXTABLE(5b, 6b)
101102
7:
103+
cli
104+
105+
#ifdef CONFIG_RETPOLINE
106+
/* IMPORTANT: Stuff the RSB immediately after VM-Exit, before RET! */
107+
FILL_RETURN_BUFFER %_ASM_AX, RSB_CLEAR_LOOPS, X86_FEATURE_RETPOLINE
108+
#endif
109+
102110
/* "POP" @regs to RAX. */
103111
pop %_ASM_AX
104112

arch/x86/kvm/vmx/nested.c

Lines changed: 19 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -5533,8 +5533,25 @@ static bool nested_vmx_exit_handled_vmcs_access(struct kvm_vcpu *vcpu,
55335533
return 1 & (b >> (field & 7));
55345534
}
55355535

5536+
static bool nested_vmx_exit_handled_mtf(struct vmcs12 *vmcs12)
5537+
{
5538+
u32 entry_intr_info = vmcs12->vm_entry_intr_info_field;
5539+
5540+
if (nested_cpu_has_mtf(vmcs12))
5541+
return true;
5542+
5543+
/*
5544+
* An MTF VM-exit may be injected into the guest by setting the
5545+
* interruption-type to 7 (other event) and the vector field to 0. Such
5546+
* is the case regardless of the 'monitor trap flag' VM-execution
5547+
* control.
5548+
*/
5549+
return entry_intr_info == (INTR_INFO_VALID_MASK
5550+
| INTR_TYPE_OTHER_EVENT);
5551+
}
5552+
55365553
/*
5537-
* Return 1 if we should exit from L2 to L1 to handle an exit, or 0 if we
5554+
* Return true if we should exit from L2 to L1 to handle an exit, or false if we
55385555
* should handle it ourselves in L0 (and then continue L2). Only call this
55395556
* when in is_guest_mode (L2).
55405557
*/
@@ -5633,7 +5650,7 @@ bool nested_vmx_exit_reflected(struct kvm_vcpu *vcpu, u32 exit_reason)
56335650
case EXIT_REASON_MWAIT_INSTRUCTION:
56345651
return nested_cpu_has(vmcs12, CPU_BASED_MWAIT_EXITING);
56355652
case EXIT_REASON_MONITOR_TRAP_FLAG:
5636-
return nested_cpu_has_mtf(vmcs12);
5653+
return nested_vmx_exit_handled_mtf(vmcs12);
56375654
case EXIT_REASON_MONITOR_INSTRUCTION:
56385655
return nested_cpu_has(vmcs12, CPU_BASED_MONITOR_EXITING);
56395656
case EXIT_REASON_PAUSE_INSTRUCTION:

arch/x86/kvm/vmx/vmx.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -4572,7 +4572,7 @@ static int handle_rmode_exception(struct kvm_vcpu *vcpu,
45724572
*/
45734573
static void kvm_machine_check(void)
45744574
{
4575-
#if defined(CONFIG_X86_MCE) && defined(CONFIG_X86_64)
4575+
#if defined(CONFIG_X86_MCE)
45764576
struct pt_regs regs = {
45774577
.cs = 3, /* Fake ring 3 no matter what the guest ran on */
45784578
.flags = X86_EFLAGS_IF,

arch/x86/kvm/x86.c

Lines changed: 19 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -3060,6 +3060,17 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
30603060
case MSR_IA32_PERF_CTL:
30613061
case MSR_AMD64_DC_CFG:
30623062
case MSR_F15H_EX_CFG:
3063+
/*
3064+
* Intel Sandy Bridge CPUs must support the RAPL (running average power
3065+
* limit) MSRs. Just return 0, as we do not want to expose the host
3066+
* data here. Do not conditionalize this on CPUID, as KVM does not do
3067+
* so for existing CPU-specific MSRs.
3068+
*/
3069+
case MSR_RAPL_POWER_UNIT:
3070+
case MSR_PP0_ENERGY_STATUS: /* Power plane 0 (core) */
3071+
case MSR_PP1_ENERGY_STATUS: /* Power plane 1 (graphics uncore) */
3072+
case MSR_PKG_ENERGY_STATUS: /* Total package */
3073+
case MSR_DRAM_ENERGY_STATUS: /* DRAM controller */
30633074
msr_info->data = 0;
30643075
break;
30653076
case MSR_F15H_PERF_CTL0 ... MSR_F15H_PERF_CTR5:
@@ -5049,10 +5060,13 @@ long kvm_arch_vm_ioctl(struct file *filp,
50495060
r = -EFAULT;
50505061
if (copy_from_user(&u.ps, argp, sizeof(u.ps)))
50515062
goto out;
5063+
mutex_lock(&kvm->lock);
50525064
r = -ENXIO;
50535065
if (!kvm->arch.vpit)
5054-
goto out;
5066+
goto set_pit_out;
50555067
r = kvm_vm_ioctl_set_pit(kvm, &u.ps);
5068+
set_pit_out:
5069+
mutex_unlock(&kvm->lock);
50565070
break;
50575071
}
50585072
case KVM_GET_PIT2: {
@@ -5072,10 +5086,13 @@ long kvm_arch_vm_ioctl(struct file *filp,
50725086
r = -EFAULT;
50735087
if (copy_from_user(&u.ps2, argp, sizeof(u.ps2)))
50745088
goto out;
5089+
mutex_lock(&kvm->lock);
50755090
r = -ENXIO;
50765091
if (!kvm->arch.vpit)
5077-
goto out;
5092+
goto set_pit2_out;
50785093
r = kvm_vm_ioctl_set_pit2(kvm, &u.ps2);
5094+
set_pit2_out:
5095+
mutex_unlock(&kvm->lock);
50795096
break;
50805097
}
50815098
case KVM_REINJECT_CONTROL: {

include/linux/kvm_host.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1048,7 +1048,7 @@ search_memslots(struct kvm_memslots *slots, gfn_t gfn)
10481048
start = slot + 1;
10491049
}
10501050

1051-
if (gfn >= memslots[start].base_gfn &&
1051+
if (start < slots->used_slots && gfn >= memslots[start].base_gfn &&
10521052
gfn < memslots[start].base_gfn + memslots[start].npages) {
10531053
atomic_set(&slots->lru_slot, start);
10541054
return &memslots[start];

0 commit comments

Comments
 (0)