Skip to content

Commit 609e36d

Browse files
committed
KVM: x86: pass host_initiated to functions that read MSRs
SMBASE is only readable from SMM for the VCPU, but it must be always accessible if userspace is accessing it. Thus, all functions that read MSRs are changed to accept a struct msr_data; the host_initiated and index fields are pre-initialized, while the data field is filled on return. Reviewed-by: Radim Krčmář <[email protected]> Signed-off-by: Paolo Bonzini <[email protected]>
1 parent 62ef68b commit 609e36d

File tree

4 files changed

+127
-101
lines changed

4 files changed

+127
-101
lines changed

arch/x86/include/asm/kvm_host.h

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -721,7 +721,7 @@ struct kvm_x86_ops {
721721
void (*vcpu_put)(struct kvm_vcpu *vcpu);
722722

723723
void (*update_db_bp_intercept)(struct kvm_vcpu *vcpu);
724-
int (*get_msr)(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata);
724+
int (*get_msr)(struct kvm_vcpu *vcpu, struct msr_data *msr);
725725
int (*set_msr)(struct kvm_vcpu *vcpu, struct msr_data *msr);
726726
u64 (*get_segment_base)(struct kvm_vcpu *vcpu, int seg);
727727
void (*get_segment)(struct kvm_vcpu *vcpu,
@@ -941,7 +941,7 @@ static inline int emulate_instruction(struct kvm_vcpu *vcpu,
941941

942942
void kvm_enable_efer_bits(u64);
943943
bool kvm_valid_efer(struct kvm_vcpu *vcpu, u64 efer);
944-
int kvm_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *data);
944+
int kvm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr);
945945
int kvm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr);
946946

947947
struct x86_emulate_ctxt;
@@ -970,7 +970,7 @@ void kvm_lmsw(struct kvm_vcpu *vcpu, unsigned long msw);
970970
void kvm_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l);
971971
int kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr);
972972

973-
int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata);
973+
int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr);
974974
int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr);
975975

976976
unsigned long kvm_get_rflags(struct kvm_vcpu *vcpu);

arch/x86/kvm/svm.c

Lines changed: 29 additions & 25 deletions
Original file line numberDiff line numberDiff line change
@@ -3069,91 +3069,95 @@ static u64 svm_read_l1_tsc(struct kvm_vcpu *vcpu, u64 host_tsc)
30693069
svm_scale_tsc(vcpu, host_tsc);
30703070
}
30713071

3072-
static int svm_get_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 *data)
3072+
static int svm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
30733073
{
30743074
struct vcpu_svm *svm = to_svm(vcpu);
30753075

3076-
switch (ecx) {
3076+
switch (msr_info->index) {
30773077
case MSR_IA32_TSC: {
3078-
*data = svm->vmcb->control.tsc_offset +
3078+
msr_info->data = svm->vmcb->control.tsc_offset +
30793079
svm_scale_tsc(vcpu, native_read_tsc());
30803080

30813081
break;
30823082
}
30833083
case MSR_STAR:
3084-
*data = svm->vmcb->save.star;
3084+
msr_info->data = svm->vmcb->save.star;
30853085
break;
30863086
#ifdef CONFIG_X86_64
30873087
case MSR_LSTAR:
3088-
*data = svm->vmcb->save.lstar;
3088+
msr_info->data = svm->vmcb->save.lstar;
30893089
break;
30903090
case MSR_CSTAR:
3091-
*data = svm->vmcb->save.cstar;
3091+
msr_info->data = svm->vmcb->save.cstar;
30923092
break;
30933093
case MSR_KERNEL_GS_BASE:
3094-
*data = svm->vmcb->save.kernel_gs_base;
3094+
msr_info->data = svm->vmcb->save.kernel_gs_base;
30953095
break;
30963096
case MSR_SYSCALL_MASK:
3097-
*data = svm->vmcb->save.sfmask;
3097+
msr_info->data = svm->vmcb->save.sfmask;
30983098
break;
30993099
#endif
31003100
case MSR_IA32_SYSENTER_CS:
3101-
*data = svm->vmcb->save.sysenter_cs;
3101+
msr_info->data = svm->vmcb->save.sysenter_cs;
31023102
break;
31033103
case MSR_IA32_SYSENTER_EIP:
3104-
*data = svm->sysenter_eip;
3104+
msr_info->data = svm->sysenter_eip;
31053105
break;
31063106
case MSR_IA32_SYSENTER_ESP:
3107-
*data = svm->sysenter_esp;
3107+
msr_info->data = svm->sysenter_esp;
31083108
break;
31093109
/*
31103110
* Nobody will change the following 5 values in the VMCB so we can
31113111
* safely return them on rdmsr. They will always be 0 until LBRV is
31123112
* implemented.
31133113
*/
31143114
case MSR_IA32_DEBUGCTLMSR:
3115-
*data = svm->vmcb->save.dbgctl;
3115+
msr_info->data = svm->vmcb->save.dbgctl;
31163116
break;
31173117
case MSR_IA32_LASTBRANCHFROMIP:
3118-
*data = svm->vmcb->save.br_from;
3118+
msr_info->data = svm->vmcb->save.br_from;
31193119
break;
31203120
case MSR_IA32_LASTBRANCHTOIP:
3121-
*data = svm->vmcb->save.br_to;
3121+
msr_info->data = svm->vmcb->save.br_to;
31223122
break;
31233123
case MSR_IA32_LASTINTFROMIP:
3124-
*data = svm->vmcb->save.last_excp_from;
3124+
msr_info->data = svm->vmcb->save.last_excp_from;
31253125
break;
31263126
case MSR_IA32_LASTINTTOIP:
3127-
*data = svm->vmcb->save.last_excp_to;
3127+
msr_info->data = svm->vmcb->save.last_excp_to;
31283128
break;
31293129
case MSR_VM_HSAVE_PA:
3130-
*data = svm->nested.hsave_msr;
3130+
msr_info->data = svm->nested.hsave_msr;
31313131
break;
31323132
case MSR_VM_CR:
3133-
*data = svm->nested.vm_cr_msr;
3133+
msr_info->data = svm->nested.vm_cr_msr;
31343134
break;
31353135
case MSR_IA32_UCODE_REV:
3136-
*data = 0x01000065;
3136+
msr_info->data = 0x01000065;
31373137
break;
31383138
default:
3139-
return kvm_get_msr_common(vcpu, ecx, data);
3139+
return kvm_get_msr_common(vcpu, msr_info);
31403140
}
31413141
return 0;
31423142
}
31433143

31443144
static int rdmsr_interception(struct vcpu_svm *svm)
31453145
{
31463146
u32 ecx = kvm_register_read(&svm->vcpu, VCPU_REGS_RCX);
3147-
u64 data;
3147+
struct msr_data msr_info;
31483148

3149-
if (svm_get_msr(&svm->vcpu, ecx, &data)) {
3149+
msr_info.index = ecx;
3150+
msr_info.host_initiated = false;
3151+
if (svm_get_msr(&svm->vcpu, &msr_info)) {
31503152
trace_kvm_msr_read_ex(ecx);
31513153
kvm_inject_gp(&svm->vcpu, 0);
31523154
} else {
3153-
trace_kvm_msr_read(ecx, data);
3155+
trace_kvm_msr_read(ecx, msr_info.data);
31543156

3155-
kvm_register_write(&svm->vcpu, VCPU_REGS_RAX, data & 0xffffffff);
3156-
kvm_register_write(&svm->vcpu, VCPU_REGS_RDX, data >> 32);
3157+
kvm_register_write(&svm->vcpu, VCPU_REGS_RAX,
3158+
msr_info.data & 0xffffffff);
3159+
kvm_register_write(&svm->vcpu, VCPU_REGS_RDX,
3160+
msr_info.data >> 32);
31573161
svm->next_rip = kvm_rip_read(&svm->vcpu) + 2;
31583162
skip_emulated_instruction(&svm->vcpu);
31593163
}

arch/x86/kvm/vmx.c

Lines changed: 30 additions & 32 deletions
Original file line numberDiff line numberDiff line change
@@ -2622,76 +2622,69 @@ static int vmx_get_vmx_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata)
26222622
* Returns 0 on success, non-0 otherwise.
26232623
* Assumes vcpu_load() was already called.
26242624
*/
2625-
static int vmx_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata)
2625+
static int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
26262626
{
2627-
u64 data;
26282627
struct shared_msr_entry *msr;
26292628

2630-
if (!pdata) {
2631-
printk(KERN_ERR "BUG: get_msr called with NULL pdata\n");
2632-
return -EINVAL;
2633-
}
2634-
2635-
switch (msr_index) {
2629+
switch (msr_info->index) {
26362630
#ifdef CONFIG_X86_64
26372631
case MSR_FS_BASE:
2638-
data = vmcs_readl(GUEST_FS_BASE);
2632+
msr_info->data = vmcs_readl(GUEST_FS_BASE);
26392633
break;
26402634
case MSR_GS_BASE:
2641-
data = vmcs_readl(GUEST_GS_BASE);
2635+
msr_info->data = vmcs_readl(GUEST_GS_BASE);
26422636
break;
26432637
case MSR_KERNEL_GS_BASE:
26442638
vmx_load_host_state(to_vmx(vcpu));
2645-
data = to_vmx(vcpu)->msr_guest_kernel_gs_base;
2639+
msr_info->data = to_vmx(vcpu)->msr_guest_kernel_gs_base;
26462640
break;
26472641
#endif
26482642
case MSR_EFER:
2649-
return kvm_get_msr_common(vcpu, msr_index, pdata);
2643+
return kvm_get_msr_common(vcpu, msr_info);
26502644
case MSR_IA32_TSC:
2651-
data = guest_read_tsc();
2645+
msr_info->data = guest_read_tsc();
26522646
break;
26532647
case MSR_IA32_SYSENTER_CS:
2654-
data = vmcs_read32(GUEST_SYSENTER_CS);
2648+
msr_info->data = vmcs_read32(GUEST_SYSENTER_CS);
26552649
break;
26562650
case MSR_IA32_SYSENTER_EIP:
2657-
data = vmcs_readl(GUEST_SYSENTER_EIP);
2651+
msr_info->data = vmcs_readl(GUEST_SYSENTER_EIP);
26582652
break;
26592653
case MSR_IA32_SYSENTER_ESP:
2660-
data = vmcs_readl(GUEST_SYSENTER_ESP);
2654+
msr_info->data = vmcs_readl(GUEST_SYSENTER_ESP);
26612655
break;
26622656
case MSR_IA32_BNDCFGS:
26632657
if (!vmx_mpx_supported())
26642658
return 1;
2665-
data = vmcs_read64(GUEST_BNDCFGS);
2659+
msr_info->data = vmcs_read64(GUEST_BNDCFGS);
26662660
break;
26672661
case MSR_IA32_FEATURE_CONTROL:
26682662
if (!nested_vmx_allowed(vcpu))
26692663
return 1;
2670-
data = to_vmx(vcpu)->nested.msr_ia32_feature_control;
2664+
msr_info->data = to_vmx(vcpu)->nested.msr_ia32_feature_control;
26712665
break;
26722666
case MSR_IA32_VMX_BASIC ... MSR_IA32_VMX_VMFUNC:
26732667
if (!nested_vmx_allowed(vcpu))
26742668
return 1;
2675-
return vmx_get_vmx_msr(vcpu, msr_index, pdata);
2669+
return vmx_get_vmx_msr(vcpu, msr_info->index, &msr_info->data);
26762670
case MSR_IA32_XSS:
26772671
if (!vmx_xsaves_supported())
26782672
return 1;
2679-
data = vcpu->arch.ia32_xss;
2673+
msr_info->data = vcpu->arch.ia32_xss;
26802674
break;
26812675
case MSR_TSC_AUX:
26822676
if (!to_vmx(vcpu)->rdtscp_enabled)
26832677
return 1;
26842678
/* Otherwise falls through */
26852679
default:
2686-
msr = find_msr_entry(to_vmx(vcpu), msr_index);
2680+
msr = find_msr_entry(to_vmx(vcpu), msr_info->index);
26872681
if (msr) {
2688-
data = msr->data;
2682+
msr_info->data = msr->data;
26892683
break;
26902684
}
2691-
return kvm_get_msr_common(vcpu, msr_index, pdata);
2685+
return kvm_get_msr_common(vcpu, msr_info);
26922686
}
26932687

2694-
*pdata = data;
26952688
return 0;
26962689
}
26972690

@@ -5473,19 +5466,21 @@ static int handle_cpuid(struct kvm_vcpu *vcpu)
54735466
static int handle_rdmsr(struct kvm_vcpu *vcpu)
54745467
{
54755468
u32 ecx = vcpu->arch.regs[VCPU_REGS_RCX];
5476-
u64 data;
5469+
struct msr_data msr_info;
54775470

5478-
if (vmx_get_msr(vcpu, ecx, &data)) {
5471+
msr_info.index = ecx;
5472+
msr_info.host_initiated = false;
5473+
if (vmx_get_msr(vcpu, &msr_info)) {
54795474
trace_kvm_msr_read_ex(ecx);
54805475
kvm_inject_gp(vcpu, 0);
54815476
return 1;
54825477
}
54835478

5484-
trace_kvm_msr_read(ecx, data);
5479+
trace_kvm_msr_read(ecx, msr_info.data);
54855480

54865481
/* FIXME: handling of bits 32:63 of rax, rdx */
5487-
vcpu->arch.regs[VCPU_REGS_RAX] = data & -1u;
5488-
vcpu->arch.regs[VCPU_REGS_RDX] = (data >> 32) & -1u;
5482+
vcpu->arch.regs[VCPU_REGS_RAX] = msr_info.data & -1u;
5483+
vcpu->arch.regs[VCPU_REGS_RDX] = (msr_info.data >> 32) & -1u;
54895484
skip_emulated_instruction(vcpu);
54905485
return 1;
54915486
}
@@ -9147,6 +9142,7 @@ static int nested_vmx_store_msr(struct kvm_vcpu *vcpu, u64 gpa, u32 count)
91479142
struct vmx_msr_entry e;
91489143

91499144
for (i = 0; i < count; i++) {
9145+
struct msr_data msr_info;
91509146
if (kvm_read_guest(vcpu->kvm,
91519147
gpa + i * sizeof(e),
91529148
&e, 2 * sizeof(u32))) {
@@ -9161,7 +9157,9 @@ static int nested_vmx_store_msr(struct kvm_vcpu *vcpu, u64 gpa, u32 count)
91619157
__func__, i, e.index, e.reserved);
91629158
return -EINVAL;
91639159
}
9164-
if (kvm_get_msr(vcpu, e.index, &e.value)) {
9160+
msr_info.host_initiated = false;
9161+
msr_info.index = e.index;
9162+
if (kvm_get_msr(vcpu, &msr_info)) {
91659163
pr_warn_ratelimited(
91669164
"%s cannot read MSR (%u, 0x%x)\n",
91679165
__func__, i, e.index);
@@ -9170,10 +9168,10 @@ static int nested_vmx_store_msr(struct kvm_vcpu *vcpu, u64 gpa, u32 count)
91709168
if (kvm_write_guest(vcpu->kvm,
91719169
gpa + i * sizeof(e) +
91729170
offsetof(struct vmx_msr_entry, value),
9173-
&e.value, sizeof(e.value))) {
9171+
&msr_info.data, sizeof(msr_info.data))) {
91749172
pr_warn_ratelimited(
91759173
"%s cannot write MSR (%u, 0x%x, 0x%llx)\n",
9176-
__func__, i, e.index, e.value);
9174+
__func__, i, e.index, msr_info.data);
91779175
return -EINVAL;
91789176
}
91799177
}

0 commit comments

Comments
 (0)