Skip to content

Commit 3f8df62

Browse files
committed
Merge tag 'kvm-x86-vmx-6.12' of https://github.com/kvm-x86/linux into HEAD
KVM VMX changes for 6.12: - Set FINAL/PAGE in the page fault error code for EPT Violations if and only if the GVA is valid. If the GVA is NOT valid, there is no guest-side page table walk and so stuffing paging related metadata is nonsensical. - Fix a bug where KVM would incorrectly synthesize a nested VM-Exit instead of emulating posted interrupt delivery to L2. - Add a lockdep assertion to detect unsafe accesses of vmcs12 structures. - Harden eVMCS loading against an impossible NULL pointer deref (really truly should be impossible). - Minor SGX fix and a cleanup.
2 parents 55e6f8f + f300948 commit 3f8df62

File tree

13 files changed

+124
-41
lines changed

13 files changed

+124
-41
lines changed

Documentation/virt/kvm/api.rst

Lines changed: 19 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -4214,7 +4214,9 @@ whether or not KVM_CAP_X86_USER_SPACE_MSR's KVM_MSR_EXIT_REASON_FILTER is
42144214
enabled. If KVM_MSR_EXIT_REASON_FILTER is enabled, KVM will exit to userspace
42154215
on denied accesses, i.e. userspace effectively intercepts the MSR access. If
42164216
KVM_MSR_EXIT_REASON_FILTER is not enabled, KVM will inject a #GP into the guest
4217-
on denied accesses.
4217+
on denied accesses. Note, if an MSR access is denied during emulation of MSR
4218+
load/stores during VMX transitions, KVM ignores KVM_MSR_EXIT_REASON_FILTER.
4219+
See the below warning for full details.
42184220

42194221
If an MSR access is allowed by userspace, KVM will emulate and/or virtualize
42204222
the access in accordance with the vCPU model. Note, KVM may still ultimately
@@ -4229,9 +4231,22 @@ filtering. In that mode, ``KVM_MSR_FILTER_DEFAULT_DENY`` is invalid and causes
42294231
an error.
42304232

42314233
.. warning::
4232-
MSR accesses as part of nested VM-Enter/VM-Exit are not filtered.
4233-
This includes both writes to individual VMCS fields and reads/writes
4234-
through the MSR lists pointed to by the VMCS.
4234+
MSR accesses that are side effects of instruction execution (emulated or
4235+
native) are not filtered as hardware does not honor MSR bitmaps outside of
4236+
RDMSR and WRMSR, and KVM mimics that behavior when emulating instructions
4237+
to avoid pointless divergence from hardware. E.g. RDPID reads MSR_TSC_AUX,
4238+
SYSENTER reads the SYSENTER MSRs, etc.
4239+
4240+
MSRs that are loaded/stored via dedicated VMCS fields are not filtered as
4241+
part of VM-Enter/VM-Exit emulation.
4242+
4243+
MSRs that are loaded/store via VMX's load/store lists _are_ filtered as part
4244+
of VM-Enter/VM-Exit emulation. If an MSR access is denied on VM-Enter, KVM
4245+
synthesizes a consistency check VM-Exit(EXIT_REASON_MSR_LOAD_FAIL). If an
4246+
MSR access is denied on VM-Exit, KVM synthesizes a VM-Abort. In short, KVM
4247+
extends Intel's architectural list of MSRs that cannot be loaded/saved via
4248+
the VM-Enter/VM-Exit MSR list. It is platform owner's responsibility to
4249+
to communicate any such restrictions to their end users.
42354250

42364251
x2APIC MSR accesses cannot be filtered (KVM silently ignores filters that
42374252
cover any x2APIC MSRs).

arch/x86/include/asm/kvm_host.h

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2062,6 +2062,8 @@ void kvm_prepare_emulation_failure_exit(struct kvm_vcpu *vcpu);
20622062

20632063
void kvm_enable_efer_bits(u64);
20642064
bool kvm_valid_efer(struct kvm_vcpu *vcpu, u64 efer);
2065+
int kvm_get_msr_with_filter(struct kvm_vcpu *vcpu, u32 index, u64 *data);
2066+
int kvm_set_msr_with_filter(struct kvm_vcpu *vcpu, u32 index, u64 data);
20652067
int __kvm_get_msr(struct kvm_vcpu *vcpu, u32 index, u64 *data, bool host_initiated);
20662068
int kvm_get_msr(struct kvm_vcpu *vcpu, u32 index, u64 *data);
20672069
int kvm_set_msr(struct kvm_vcpu *vcpu, u32 index, u64 data);
@@ -2264,6 +2266,7 @@ int kvm_cpu_has_injectable_intr(struct kvm_vcpu *v);
22642266
int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu);
22652267
int kvm_cpu_has_extint(struct kvm_vcpu *v);
22662268
int kvm_arch_interrupt_allowed(struct kvm_vcpu *vcpu);
2269+
int kvm_cpu_get_extint(struct kvm_vcpu *v);
22672270
int kvm_cpu_get_interrupt(struct kvm_vcpu *v);
22682271
void kvm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event);
22692272

arch/x86/kvm/irq.c

Lines changed: 7 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -108,7 +108,7 @@ EXPORT_SYMBOL_GPL(kvm_cpu_has_interrupt);
108108
* Read pending interrupt(from non-APIC source)
109109
* vector and intack.
110110
*/
111-
static int kvm_cpu_get_extint(struct kvm_vcpu *v)
111+
int kvm_cpu_get_extint(struct kvm_vcpu *v)
112112
{
113113
if (!kvm_cpu_has_extint(v)) {
114114
WARN_ON(!lapic_in_kernel(v));
@@ -131,6 +131,7 @@ static int kvm_cpu_get_extint(struct kvm_vcpu *v)
131131
} else
132132
return kvm_pic_read_irq(v->kvm); /* PIC */
133133
}
134+
EXPORT_SYMBOL_GPL(kvm_cpu_get_extint);
134135

135136
/*
136137
* Read pending interrupt vector and intack.
@@ -141,9 +142,12 @@ int kvm_cpu_get_interrupt(struct kvm_vcpu *v)
141142
if (vector != -1)
142143
return vector; /* PIC */
143144

144-
return kvm_get_apic_interrupt(v); /* APIC */
145+
vector = kvm_apic_has_interrupt(v); /* APIC */
146+
if (vector != -1)
147+
kvm_apic_ack_interrupt(v, vector);
148+
149+
return vector;
145150
}
146-
EXPORT_SYMBOL_GPL(kvm_cpu_get_interrupt);
147151

148152
void kvm_inject_pending_timer_irqs(struct kvm_vcpu *vcpu)
149153
{

arch/x86/kvm/lapic.c

Lines changed: 4 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -2959,14 +2959,13 @@ void kvm_inject_apic_timer_irqs(struct kvm_vcpu *vcpu)
29592959
}
29602960
}
29612961

2962-
int kvm_get_apic_interrupt(struct kvm_vcpu *vcpu)
2962+
void kvm_apic_ack_interrupt(struct kvm_vcpu *vcpu, int vector)
29632963
{
2964-
int vector = kvm_apic_has_interrupt(vcpu);
29652964
struct kvm_lapic *apic = vcpu->arch.apic;
29662965
u32 ppr;
29672966

2968-
if (vector == -1)
2969-
return -1;
2967+
if (WARN_ON_ONCE(vector < 0 || !apic))
2968+
return;
29702969

29712970
/*
29722971
* We get here even with APIC virtualization enabled, if doing
@@ -2994,8 +2993,8 @@ int kvm_get_apic_interrupt(struct kvm_vcpu *vcpu)
29942993
__apic_update_ppr(apic, &ppr);
29952994
}
29962995

2997-
return vector;
29982996
}
2997+
EXPORT_SYMBOL_GPL(kvm_apic_ack_interrupt);
29992998

30002999
static int kvm_apic_state_fixup(struct kvm_vcpu *vcpu,
30013000
struct kvm_lapic_state *s, bool set)

arch/x86/kvm/lapic.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -88,8 +88,8 @@ int kvm_create_lapic(struct kvm_vcpu *vcpu);
8888
void kvm_free_lapic(struct kvm_vcpu *vcpu);
8989

9090
int kvm_apic_has_interrupt(struct kvm_vcpu *vcpu);
91+
void kvm_apic_ack_interrupt(struct kvm_vcpu *vcpu, int vector);
9192
int kvm_apic_accept_pic_intr(struct kvm_vcpu *vcpu);
92-
int kvm_get_apic_interrupt(struct kvm_vcpu *vcpu);
9393
int kvm_apic_accept_events(struct kvm_vcpu *vcpu);
9494
void kvm_lapic_reset(struct kvm_vcpu *vcpu, bool init_event);
9595
u64 kvm_lapic_get_cr8(struct kvm_vcpu *vcpu);

arch/x86/kvm/vmx/nested.c

Lines changed: 54 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -981,7 +981,7 @@ static u32 nested_vmx_load_msr(struct kvm_vcpu *vcpu, u64 gpa, u32 count)
981981
__func__, i, e.index, e.reserved);
982982
goto fail;
983983
}
984-
if (kvm_set_msr(vcpu, e.index, e.value)) {
984+
if (kvm_set_msr_with_filter(vcpu, e.index, e.value)) {
985985
pr_debug_ratelimited(
986986
"%s cannot write MSR (%u, 0x%x, 0x%llx)\n",
987987
__func__, i, e.index, e.value);
@@ -1017,7 +1017,7 @@ static bool nested_vmx_get_vmexit_msr_value(struct kvm_vcpu *vcpu,
10171017
}
10181018
}
10191019

1020-
if (kvm_get_msr(vcpu, msr_index, data)) {
1020+
if (kvm_get_msr_with_filter(vcpu, msr_index, data)) {
10211021
pr_debug_ratelimited("%s cannot read MSR (0x%x)\n", __func__,
10221022
msr_index);
10231023
return false;
@@ -1112,9 +1112,9 @@ static void prepare_vmx_msr_autostore_list(struct kvm_vcpu *vcpu,
11121112
/*
11131113
* Emulated VMEntry does not fail here. Instead a less
11141114
* accurate value will be returned by
1115-
* nested_vmx_get_vmexit_msr_value() using kvm_get_msr()
1116-
* instead of reading the value from the vmcs02 VMExit
1117-
* MSR-store area.
1115+
* nested_vmx_get_vmexit_msr_value() by reading KVM's
1116+
* internal MSR state instead of reading the value from
1117+
* the vmcs02 VMExit MSR-store area.
11181118
*/
11191119
pr_warn_ratelimited(
11201120
"Not enough msr entries in msr_autostore. Can't add msr %x\n",
@@ -2341,10 +2341,12 @@ static void prepare_vmcs02_early(struct vcpu_vmx *vmx, struct loaded_vmcs *vmcs0
23412341

23422342
/* Posted interrupts setting is only taken from vmcs12. */
23432343
vmx->nested.pi_pending = false;
2344-
if (nested_cpu_has_posted_intr(vmcs12))
2344+
if (nested_cpu_has_posted_intr(vmcs12)) {
23452345
vmx->nested.posted_intr_nv = vmcs12->posted_intr_nv;
2346-
else
2346+
} else {
2347+
vmx->nested.posted_intr_nv = -1;
23472348
exec_control &= ~PIN_BASED_POSTED_INTR;
2349+
}
23482350
pin_controls_set(vmx, exec_control);
23492351

23502352
/*
@@ -2494,6 +2496,7 @@ static void prepare_vmcs02_rare(struct vcpu_vmx *vmx, struct vmcs12 *vmcs12)
24942496

24952497
if (!hv_evmcs || !(hv_evmcs->hv_clean_fields &
24962498
HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP2)) {
2499+
24972500
vmcs_write16(GUEST_ES_SELECTOR, vmcs12->guest_es_selector);
24982501
vmcs_write16(GUEST_CS_SELECTOR, vmcs12->guest_cs_selector);
24992502
vmcs_write16(GUEST_SS_SELECTOR, vmcs12->guest_ss_selector);
@@ -2531,7 +2534,7 @@ static void prepare_vmcs02_rare(struct vcpu_vmx *vmx, struct vmcs12 *vmcs12)
25312534
vmcs_writel(GUEST_GDTR_BASE, vmcs12->guest_gdtr_base);
25322535
vmcs_writel(GUEST_IDTR_BASE, vmcs12->guest_idtr_base);
25332536

2534-
vmx->segment_cache.bitmask = 0;
2537+
vmx_segment_cache_clear(vmx);
25352538
}
25362539

25372540
if (!hv_evmcs || !(hv_evmcs->hv_clean_fields &
@@ -4308,11 +4311,52 @@ static int vmx_check_nested_events(struct kvm_vcpu *vcpu)
43084311
}
43094312

43104313
if (kvm_cpu_has_interrupt(vcpu) && !vmx_interrupt_blocked(vcpu)) {
4314+
int irq;
4315+
43114316
if (block_nested_events)
43124317
return -EBUSY;
43134318
if (!nested_exit_on_intr(vcpu))
43144319
goto no_vmexit;
4315-
nested_vmx_vmexit(vcpu, EXIT_REASON_EXTERNAL_INTERRUPT, 0, 0);
4320+
4321+
if (!nested_exit_intr_ack_set(vcpu)) {
4322+
nested_vmx_vmexit(vcpu, EXIT_REASON_EXTERNAL_INTERRUPT, 0, 0);
4323+
return 0;
4324+
}
4325+
4326+
irq = kvm_cpu_get_extint(vcpu);
4327+
if (irq != -1) {
4328+
nested_vmx_vmexit(vcpu, EXIT_REASON_EXTERNAL_INTERRUPT,
4329+
INTR_INFO_VALID_MASK | INTR_TYPE_EXT_INTR | irq, 0);
4330+
return 0;
4331+
}
4332+
4333+
irq = kvm_apic_has_interrupt(vcpu);
4334+
if (WARN_ON_ONCE(irq < 0))
4335+
goto no_vmexit;
4336+
4337+
/*
4338+
* If the IRQ is L2's PI notification vector, process posted
4339+
* interrupts for L2 instead of injecting VM-Exit, as the
4340+
* detection/morphing architecturally occurs when the IRQ is
4341+
* delivered to the CPU. Note, only interrupts that are routed
4342+
* through the local APIC trigger posted interrupt processing,
4343+
* and enabling posted interrupts requires ACK-on-exit.
4344+
*/
4345+
if (irq == vmx->nested.posted_intr_nv) {
4346+
vmx->nested.pi_pending = true;
4347+
kvm_apic_clear_irr(vcpu, irq);
4348+
goto no_vmexit;
4349+
}
4350+
4351+
nested_vmx_vmexit(vcpu, EXIT_REASON_EXTERNAL_INTERRUPT,
4352+
INTR_INFO_VALID_MASK | INTR_TYPE_EXT_INTR | irq, 0);
4353+
4354+
/*
4355+
* ACK the interrupt _after_ emulating VM-Exit, as the IRQ must
4356+
* be marked as in-service in vmcs01.GUEST_INTERRUPT_STATUS.SVI
4357+
* if APICv is active.
4358+
*/
4359+
kvm_apic_ack_interrupt(vcpu, irq);
43164360
return 0;
43174361
}
43184362

@@ -4830,7 +4874,7 @@ static void nested_vmx_restore_host_state(struct kvm_vcpu *vcpu)
48304874
goto vmabort;
48314875
}
48324876

4833-
if (kvm_set_msr(vcpu, h.index, h.value)) {
4877+
if (kvm_set_msr_with_filter(vcpu, h.index, h.value)) {
48344878
pr_debug_ratelimited(
48354879
"%s WRMSR failed (%u, 0x%x, 0x%llx)\n",
48364880
__func__, j, h.index, h.value);
@@ -4993,14 +5037,6 @@ void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 vm_exit_reason,
49935037
vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
49945038

49955039
if (likely(!vmx->fail)) {
4996-
if ((u16)vm_exit_reason == EXIT_REASON_EXTERNAL_INTERRUPT &&
4997-
nested_exit_intr_ack_set(vcpu)) {
4998-
int irq = kvm_cpu_get_interrupt(vcpu);
4999-
WARN_ON(irq < 0);
5000-
vmcs12->vm_exit_intr_info = irq |
5001-
INTR_INFO_VALID_MASK | INTR_TYPE_EXT_INTR;
5002-
}
5003-
50045040
if (vm_exit_reason != -1)
50055041
trace_kvm_nested_vmexit_inject(vmcs12->vm_exit_reason,
50065042
vmcs12->exit_qualification,

arch/x86/kvm/vmx/nested.h

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -39,11 +39,17 @@ bool nested_vmx_check_io_bitmaps(struct kvm_vcpu *vcpu, unsigned int port,
3939

4040
static inline struct vmcs12 *get_vmcs12(struct kvm_vcpu *vcpu)
4141
{
42+
lockdep_assert_once(lockdep_is_held(&vcpu->mutex) ||
43+
!refcount_read(&vcpu->kvm->users_count));
44+
4245
return to_vmx(vcpu)->nested.cached_vmcs12;
4346
}
4447

4548
static inline struct vmcs12 *get_shadow_vmcs12(struct kvm_vcpu *vcpu)
4649
{
50+
lockdep_assert_once(lockdep_is_held(&vcpu->mutex) ||
51+
!refcount_read(&vcpu->kvm->users_count));
52+
4753
return to_vmx(vcpu)->nested.cached_shadow_vmcs12;
4854
}
4955

arch/x86/kvm/vmx/sgx.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -274,7 +274,7 @@ static int handle_encls_ecreate(struct kvm_vcpu *vcpu)
274274
* simultaneously set SGX_ATTR_PROVISIONKEY to bypass the check to
275275
* enforce restriction of access to the PROVISIONKEY.
276276
*/
277-
contents = (struct sgx_secs *)__get_free_page(GFP_KERNEL_ACCOUNT);
277+
contents = (struct sgx_secs *)__get_free_page(GFP_KERNEL);
278278
if (!contents)
279279
return -ENOMEM;
280280

arch/x86/kvm/vmx/vmx.c

Lines changed: 11 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -525,10 +525,6 @@ static const struct kvm_vmx_segment_field {
525525
VMX_SEGMENT_FIELD(LDTR),
526526
};
527527

528-
static inline void vmx_segment_cache_clear(struct vcpu_vmx *vmx)
529-
{
530-
vmx->segment_cache.bitmask = 0;
531-
}
532528

533529
static unsigned long host_idt_base;
534530

@@ -4219,6 +4215,13 @@ static int vmx_deliver_nested_posted_interrupt(struct kvm_vcpu *vcpu,
42194215
{
42204216
struct vcpu_vmx *vmx = to_vmx(vcpu);
42214217

4218+
/*
4219+
* DO NOT query the vCPU's vmcs12, as vmcs12 is dynamically allocated
4220+
* and freed, and must not be accessed outside of vcpu->mutex. The
4221+
* vCPU's cached PI NV is valid if and only if posted interrupts
4222+
* enabled in its vmcs12, i.e. checking the vector also checks that
4223+
* L1 has enabled posted interrupts for L2.
4224+
*/
42224225
if (is_guest_mode(vcpu) &&
42234226
vector == vmx->nested.posted_intr_nv) {
42244227
/*
@@ -5804,8 +5807,9 @@ static int handle_ept_violation(struct kvm_vcpu *vcpu)
58045807
error_code |= (exit_qualification & EPT_VIOLATION_RWX_MASK)
58055808
? PFERR_PRESENT_MASK : 0;
58065809

5807-
error_code |= (exit_qualification & EPT_VIOLATION_GVA_TRANSLATED) != 0 ?
5808-
PFERR_GUEST_FINAL_MASK : PFERR_GUEST_PAGE_MASK;
5810+
if (error_code & EPT_VIOLATION_GVA_IS_VALID)
5811+
error_code |= (exit_qualification & EPT_VIOLATION_GVA_TRANSLATED) ?
5812+
PFERR_GUEST_FINAL_MASK : PFERR_GUEST_PAGE_MASK;
58095813

58105814
/*
58115815
* Check that the GPA doesn't exceed physical memory limits, as that is
@@ -7969,6 +7973,7 @@ static __init void vmx_set_cpu_caps(void)
79697973
kvm_cpu_cap_clear(X86_FEATURE_SGX_LC);
79707974
kvm_cpu_cap_clear(X86_FEATURE_SGX1);
79717975
kvm_cpu_cap_clear(X86_FEATURE_SGX2);
7976+
kvm_cpu_cap_clear(X86_FEATURE_SGX_EDECCSSA);
79727977
}
79737978

79747979
if (vmx_umip_emulated())

arch/x86/kvm/vmx/vmx.h

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -752,4 +752,9 @@ static inline bool vmx_can_use_ipiv(struct kvm_vcpu *vcpu)
752752
return lapic_in_kernel(vcpu) && enable_ipiv;
753753
}
754754

755+
static inline void vmx_segment_cache_clear(struct vcpu_vmx *vmx)
756+
{
757+
vmx->segment_cache.bitmask = 0;
758+
}
759+
755760
#endif /* __KVM_X86_VMX_H */

arch/x86/kvm/vmx/vmx_onhyperv.h

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -104,6 +104,14 @@ static inline void evmcs_load(u64 phys_addr)
104104
struct hv_vp_assist_page *vp_ap =
105105
hv_get_vp_assist_page(smp_processor_id());
106106

107+
/*
108+
* When enabling eVMCS, KVM verifies that every CPU has a valid hv_vp_assist_page()
109+
* and aborts enabling the feature otherwise. CPU onlining path is also checked in
110+
* vmx_hardware_enable().
111+
*/
112+
if (KVM_BUG_ON(!vp_ap, kvm_get_running_vcpu()->kvm))
113+
return;
114+
107115
if (current_evmcs->hv_enlightenments_control.nested_flush_hypercall)
108116
vp_ap->nested_control.features.directhypercall = 1;
109117
vp_ap->current_nested_vmcs = phys_addr;

arch/x86/kvm/vmx/vmx_ops.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -47,7 +47,7 @@ static __always_inline void vmcs_check16(unsigned long field)
4747
BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6001) == 0x2001,
4848
"16-bit accessor invalid for 64-bit high field");
4949
BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x4000,
50-
"16-bit accessor invalid for 32-bit high field");
50+
"16-bit accessor invalid for 32-bit field");
5151
BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x6000,
5252
"16-bit accessor invalid for natural width field");
5353
}

arch/x86/kvm/x86.c

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1958,19 +1958,21 @@ static int kvm_get_msr_ignored_check(struct kvm_vcpu *vcpu,
19581958
__kvm_get_msr);
19591959
}
19601960

1961-
static int kvm_get_msr_with_filter(struct kvm_vcpu *vcpu, u32 index, u64 *data)
1961+
int kvm_get_msr_with_filter(struct kvm_vcpu *vcpu, u32 index, u64 *data)
19621962
{
19631963
if (!kvm_msr_allowed(vcpu, index, KVM_MSR_FILTER_READ))
19641964
return KVM_MSR_RET_FILTERED;
19651965
return kvm_get_msr_ignored_check(vcpu, index, data, false);
19661966
}
1967+
EXPORT_SYMBOL_GPL(kvm_get_msr_with_filter);
19671968

1968-
static int kvm_set_msr_with_filter(struct kvm_vcpu *vcpu, u32 index, u64 data)
1969+
int kvm_set_msr_with_filter(struct kvm_vcpu *vcpu, u32 index, u64 data)
19691970
{
19701971
if (!kvm_msr_allowed(vcpu, index, KVM_MSR_FILTER_WRITE))
19711972
return KVM_MSR_RET_FILTERED;
19721973
return kvm_set_msr_ignored_check(vcpu, index, data, false);
19731974
}
1975+
EXPORT_SYMBOL_GPL(kvm_set_msr_with_filter);
19741976

19751977
int kvm_get_msr(struct kvm_vcpu *vcpu, u32 index, u64 *data)
19761978
{

0 commit comments

Comments
 (0)