Skip to content

Commit fe09396

Browse files
bonziniBrian Maly
authored andcommitted
KVM: x86: pass host_initiated to functions that read MSRs
SMBASE is only readable from SMM for the VCPU, but it must be always accessible if userspace is accessing it. Thus, all functions that read MSRs are changed to accept a struct msr_data; the host_initiated and index fields are pre-initialized, while the data field is filled on return. Reviewed-by: Radim Krčmář <[email protected]> Reviewed-by: Konrad Rzeszutek Wilk <[email protected]> Signed-off-by: Paolo Bonzini <[email protected]> (cherry picked from commit 609e36d) Orabug: 28069548 Signed-off-by: Mihai Carabas <[email protected]> Reviewed-by: Darren Kenny <[email protected]> Reviewed-by: Boris Ostrovsky <[email protected]> Signed-off-by: Brian Maly <[email protected]>
1 parent b3bd557 commit fe09396

File tree

4 files changed

+131
-105
lines changed

4 files changed

+131
-105
lines changed

arch/x86/include/asm/kvm_host.h

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -729,7 +729,7 @@ struct kvm_x86_ops {
729729
void (*vcpu_put)(struct kvm_vcpu *vcpu);
730730

731731
void (*update_db_bp_intercept)(struct kvm_vcpu *vcpu);
732-
int (*get_msr)(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata);
732+
int (*get_msr)(struct kvm_vcpu *vcpu, struct msr_data *msr);
733733
int (*set_msr)(struct kvm_vcpu *vcpu, struct msr_data *msr);
734734
u64 (*get_segment_base)(struct kvm_vcpu *vcpu, int seg);
735735
void (*get_segment)(struct kvm_vcpu *vcpu,
@@ -951,7 +951,7 @@ static inline int emulate_instruction(struct kvm_vcpu *vcpu,
951951

952952
void kvm_enable_efer_bits(u64);
953953
bool kvm_valid_efer(struct kvm_vcpu *vcpu, u64 efer);
954-
int kvm_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *data);
954+
int kvm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr);
955955
int kvm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr);
956956

957957
struct x86_emulate_ctxt;
@@ -980,7 +980,7 @@ void kvm_lmsw(struct kvm_vcpu *vcpu, unsigned long msw);
980980
void kvm_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l);
981981
int kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr);
982982

983-
int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata);
983+
int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr);
984984
int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr);
985985

986986
unsigned long kvm_get_rflags(struct kvm_vcpu *vcpu);

arch/x86/kvm/svm.c

Lines changed: 31 additions & 27 deletions
Original file line numberDiff line numberDiff line change
@@ -3112,97 +3112,101 @@ static int svm_get_msr_feature(struct kvm_msr_entry *msr)
31123112
return 1;
31133113
}
31143114

3115-
static int svm_get_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 *data)
3115+
static int svm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
31163116
{
31173117
struct vcpu_svm *svm = to_svm(vcpu);
31183118

3119-
switch (ecx) {
3119+
switch (msr_info->index) {
31203120
case MSR_IA32_TSC: {
3121-
*data = svm->vmcb->control.tsc_offset +
3121+
msr_info->data = svm->vmcb->control.tsc_offset +
31223122
svm_scale_tsc(vcpu, native_read_tsc());
31233123

31243124
break;
31253125
}
31263126
case MSR_STAR:
3127-
*data = svm->vmcb->save.star;
3127+
msr_info->data = svm->vmcb->save.star;
31283128
break;
31293129
#ifdef CONFIG_X86_64
31303130
case MSR_LSTAR:
3131-
*data = svm->vmcb->save.lstar;
3131+
msr_info->data = svm->vmcb->save.lstar;
31323132
break;
31333133
case MSR_CSTAR:
3134-
*data = svm->vmcb->save.cstar;
3134+
msr_info->data = svm->vmcb->save.cstar;
31353135
break;
31363136
case MSR_KERNEL_GS_BASE:
3137-
*data = svm->vmcb->save.kernel_gs_base;
3137+
msr_info->data = svm->vmcb->save.kernel_gs_base;
31383138
break;
31393139
case MSR_SYSCALL_MASK:
3140-
*data = svm->vmcb->save.sfmask;
3140+
msr_info->data = svm->vmcb->save.sfmask;
31413141
break;
31423142
#endif
31433143
case MSR_IA32_SYSENTER_CS:
3144-
*data = svm->vmcb->save.sysenter_cs;
3144+
msr_info->data = svm->vmcb->save.sysenter_cs;
31453145
break;
31463146
case MSR_IA32_SYSENTER_EIP:
3147-
*data = svm->sysenter_eip;
3147+
msr_info->data = svm->sysenter_eip;
31483148
break;
31493149
case MSR_IA32_SYSENTER_ESP:
3150-
*data = svm->sysenter_esp;
3150+
msr_info->data = svm->sysenter_esp;
31513151
break;
31523152
/*
31533153
* Nobody will change the following 5 values in the VMCB so we can
31543154
* safely return them on rdmsr. They will always be 0 until LBRV is
31553155
* implemented.
31563156
*/
31573157
case MSR_IA32_DEBUGCTLMSR:
3158-
*data = svm->vmcb->save.dbgctl;
3158+
msr_info->data = svm->vmcb->save.dbgctl;
31593159
break;
31603160
case MSR_IA32_LASTBRANCHFROMIP:
3161-
*data = svm->vmcb->save.br_from;
3161+
msr_info->data = svm->vmcb->save.br_from;
31623162
break;
31633163
case MSR_IA32_LASTBRANCHTOIP:
3164-
*data = svm->vmcb->save.br_to;
3164+
msr_info->data = svm->vmcb->save.br_to;
31653165
break;
31663166
case MSR_IA32_LASTINTFROMIP:
3167-
*data = svm->vmcb->save.last_excp_from;
3167+
msr_info->data = svm->vmcb->save.last_excp_from;
31683168
break;
31693169
case MSR_IA32_LASTINTTOIP:
3170-
*data = svm->vmcb->save.last_excp_to;
3170+
msr_info->data = svm->vmcb->save.last_excp_to;
31713171
break;
31723172
case MSR_VM_HSAVE_PA:
3173-
*data = svm->nested.hsave_msr;
3173+
msr_info->data = svm->nested.hsave_msr;
31743174
break;
31753175
case MSR_VM_CR:
3176-
*data = svm->nested.vm_cr_msr;
3176+
msr_info->data = svm->nested.vm_cr_msr;
31773177
break;
31783178
case MSR_IA32_SPEC_CTRL:
3179-
*data = svm->spec_ctrl;
3179+
msr_info->data = svm->spec_ctrl;
31803180
break;
31813181
case MSR_AMD64_VIRT_SPEC_CTRL:
3182-
*data = svm->virt_spec_ctrl;
3182+
msr_info->data = svm->virt_spec_ctrl;
31833183
break;
31843184
case MSR_IA32_UCODE_REV:
3185-
*data = 0x01000065;
3185+
msr_info->data = 0x01000065;
31863186
break;
31873187
default:
3188-
return kvm_get_msr_common(vcpu, ecx, data);
3188+
return kvm_get_msr_common(vcpu, msr_info);
31893189
}
31903190
return 0;
31913191
}
31923192

31933193
static int rdmsr_interception(struct vcpu_svm *svm)
31943194
{
31953195
u32 ecx = kvm_register_read(&svm->vcpu, VCPU_REGS_RCX);
3196-
u64 data;
3196+
struct msr_data msr_info;
31973197

3198-
if (svm_get_msr(&svm->vcpu, ecx, &data)) {
3198+
msr_info.index = ecx;
3199+
msr_info.host_initiated = false;
3200+
if (svm_get_msr(&svm->vcpu, &msr_info)) {
31993201
trace_kvm_msr_read_ex(ecx);
32003202
kvm_inject_gp(&svm->vcpu, 0);
32013203
} else {
3202-
trace_kvm_msr_read(ecx, data);
3204+
trace_kvm_msr_read(ecx, msr_info.data);
32033205

3204-
kvm_register_write(&svm->vcpu, VCPU_REGS_RAX, data & 0xffffffff);
3205-
kvm_register_write(&svm->vcpu, VCPU_REGS_RDX, data >> 32);
3206+
kvm_register_write(&svm->vcpu, VCPU_REGS_RAX,
3207+
msr_info.data & 0xffffffff);
3208+
kvm_register_write(&svm->vcpu, VCPU_REGS_RDX,
3209+
msr_info.data >> 32);
32063210
svm->next_rip = kvm_rip_read(&svm->vcpu) + 2;
32073211
skip_emulated_instruction(&svm->vcpu);
32083212
}

arch/x86/kvm/vmx.c

Lines changed: 32 additions & 34 deletions
Original file line numberDiff line numberDiff line change
@@ -2808,82 +2808,75 @@ static int vmx_get_msr_feature(struct kvm_msr_entry *msr)
28082808
* Returns 0 on success, non-0 otherwise.
28092809
* Assumes vcpu_load() was already called.
28102810
*/
2811-
static int vmx_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata)
2811+
static int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
28122812
{
2813-
u64 data;
28142813
struct shared_msr_entry *msr;
28152814

2816-
if (!pdata) {
2817-
printk(KERN_ERR "BUG: get_msr called with NULL pdata\n");
2818-
return -EINVAL;
2819-
}
2820-
2821-
switch (msr_index) {
2815+
switch (msr_info->index) {
28222816
#ifdef CONFIG_X86_64
28232817
case MSR_FS_BASE:
2824-
data = vmcs_readl(GUEST_FS_BASE);
2818+
msr_info->data = vmcs_readl(GUEST_FS_BASE);
28252819
break;
28262820
case MSR_GS_BASE:
2827-
data = vmcs_readl(GUEST_GS_BASE);
2821+
msr_info->data = vmcs_readl(GUEST_GS_BASE);
28282822
break;
28292823
case MSR_KERNEL_GS_BASE:
28302824
vmx_load_host_state(to_vmx(vcpu));
2831-
data = to_vmx(vcpu)->msr_guest_kernel_gs_base;
2825+
msr_info->data = to_vmx(vcpu)->msr_guest_kernel_gs_base;
28322826
break;
28332827
#endif
28342828
case MSR_EFER:
2835-
return kvm_get_msr_common(vcpu, msr_index, pdata);
2829+
return kvm_get_msr_common(vcpu, msr_info);
28362830
case MSR_IA32_TSC:
2837-
data = guest_read_tsc();
2831+
msr_info->data = guest_read_tsc();
28382832
break;
28392833
case MSR_IA32_SPEC_CTRL:
2840-
data = to_vmx(vcpu)->spec_ctrl;
2834+
msr_info->data = to_vmx(vcpu)->spec_ctrl;
28412835
break;
28422836
case MSR_IA32_ARCH_CAPABILITIES:
2843-
data = to_vmx(vcpu)->arch_capabilities;
2837+
msr_info->data = to_vmx(vcpu)->arch_capabilities;
28442838
break;
28452839
case MSR_IA32_SYSENTER_CS:
2846-
data = vmcs_read32(GUEST_SYSENTER_CS);
2840+
msr_info->data = vmcs_read32(GUEST_SYSENTER_CS);
28472841
break;
28482842
case MSR_IA32_SYSENTER_EIP:
2849-
data = vmcs_readl(GUEST_SYSENTER_EIP);
2843+
msr_info->data = vmcs_readl(GUEST_SYSENTER_EIP);
28502844
break;
28512845
case MSR_IA32_SYSENTER_ESP:
2852-
data = vmcs_readl(GUEST_SYSENTER_ESP);
2846+
msr_info->data = vmcs_readl(GUEST_SYSENTER_ESP);
28532847
break;
28542848
case MSR_IA32_BNDCFGS:
28552849
if (!vmx_mpx_supported())
28562850
return 1;
2857-
data = vmcs_read64(GUEST_BNDCFGS);
2851+
msr_info->data = vmcs_read64(GUEST_BNDCFGS);
28582852
break;
28592853
case MSR_IA32_FEATURE_CONTROL:
28602854
if (!nested_vmx_allowed(vcpu))
28612855
return 1;
2862-
data = to_vmx(vcpu)->nested.msr_ia32_feature_control;
2856+
msr_info->data = to_vmx(vcpu)->nested.msr_ia32_feature_control;
28632857
break;
28642858
case MSR_IA32_VMX_BASIC ... MSR_IA32_VMX_VMFUNC:
28652859
if (!nested_vmx_allowed(vcpu))
28662860
return 1;
2867-
return vmx_get_vmx_msr(vcpu, msr_index, pdata);
2861+
return vmx_get_vmx_msr(vcpu, msr_info->index, &msr_info->data);
28682862
case MSR_IA32_XSS:
28692863
if (!vmx_xsaves_supported())
28702864
return 1;
2871-
data = vcpu->arch.ia32_xss;
2865+
msr_info->data = vcpu->arch.ia32_xss;
28722866
break;
28732867
case MSR_TSC_AUX:
28742868
if (!to_vmx(vcpu)->rdtscp_enabled)
28752869
return 1;
28762870
/* Otherwise falls through */
28772871
default:
2878-
msr = find_msr_entry(to_vmx(vcpu), msr_index);
2872+
msr = find_msr_entry(to_vmx(vcpu), msr_info->index);
28792873
if (msr) {
2880-
data = msr->data;
2874+
msr_info->data = msr->data;
28812875
break;
28822876
}
2883-
return kvm_get_msr_common(vcpu, msr_index, pdata);
2877+
return kvm_get_msr_common(vcpu, msr_info);
28842878
}
28852879

2886-
*pdata = data;
28872880
return 0;
28882881
}
28892882

@@ -5787,19 +5780,21 @@ static int handle_cpuid(struct kvm_vcpu *vcpu)
57875780
static int handle_rdmsr(struct kvm_vcpu *vcpu)
57885781
{
57895782
u32 ecx = vcpu->arch.regs[VCPU_REGS_RCX];
5790-
u64 data;
5783+
struct msr_data msr_info;
57915784

5792-
if (vmx_get_msr(vcpu, ecx, &data)) {
5785+
msr_info.index = ecx;
5786+
msr_info.host_initiated = false;
5787+
if (vmx_get_msr(vcpu, &msr_info)) {
57935788
trace_kvm_msr_read_ex(ecx);
57945789
kvm_inject_gp(vcpu, 0);
57955790
return 1;
57965791
}
57975792

5798-
trace_kvm_msr_read(ecx, data);
5793+
trace_kvm_msr_read(ecx, msr_info.data);
57995794

58005795
/* FIXME: handling of bits 32:63 of rax, rdx */
5801-
vcpu->arch.regs[VCPU_REGS_RAX] = data & -1u;
5802-
vcpu->arch.regs[VCPU_REGS_RDX] = (data >> 32) & -1u;
5796+
vcpu->arch.regs[VCPU_REGS_RAX] = msr_info.data & -1u;
5797+
vcpu->arch.regs[VCPU_REGS_RDX] = (msr_info.data >> 32) & -1u;
58035798
skip_emulated_instruction(vcpu);
58045799
return 1;
58055800
}
@@ -9289,6 +9284,7 @@ static int nested_vmx_store_msr(struct kvm_vcpu *vcpu, u64 gpa, u32 count)
92899284
struct vmx_msr_entry e;
92909285

92919286
for (i = 0; i < count; i++) {
9287+
struct msr_data msr_info;
92929288
if (kvm_read_guest(vcpu->kvm,
92939289
gpa + i * sizeof(e),
92949290
&e, 2 * sizeof(u32))) {
@@ -9303,7 +9299,9 @@ static int nested_vmx_store_msr(struct kvm_vcpu *vcpu, u64 gpa, u32 count)
93039299
__func__, i, e.index, e.reserved);
93049300
return -EINVAL;
93059301
}
9306-
if (kvm_get_msr(vcpu, e.index, &e.value)) {
9302+
msr_info.host_initiated = false;
9303+
msr_info.index = e.index;
9304+
if (kvm_get_msr(vcpu, &msr_info)) {
93079305
pr_warn_ratelimited(
93089306
"%s cannot read MSR (%u, 0x%x)\n",
93099307
__func__, i, e.index);
@@ -9312,10 +9310,10 @@ static int nested_vmx_store_msr(struct kvm_vcpu *vcpu, u64 gpa, u32 count)
93129310
if (kvm_write_guest(vcpu->kvm,
93139311
gpa + i * sizeof(e) +
93149312
offsetof(struct vmx_msr_entry, value),
9315-
&e.value, sizeof(e.value))) {
9313+
&msr_info.data, sizeof(msr_info.data))) {
93169314
pr_warn_ratelimited(
93179315
"%s cannot write MSR (%u, 0x%x, 0x%llx)\n",
9318-
__func__, i, e.index, e.value);
9316+
__func__, i, e.index, msr_info.data);
93199317
return -EINVAL;
93209318
}
93219319
}

0 commit comments

Comments
 (0)