Skip to content

Commit cd802e7

Browse files
committed
Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm
Pull KVM fixes from Paolo Bonzini: "ARM: - Avoid use of uninitialized memcache pointer in user_mem_abort() - Always set HCR_EL2.xMO bits when running in VHE, allowing interrupts to be taken while TGE=0 and fixing an ugly bug on AmpereOne that occurs when taking an interrupt while clearing the xMO bits (AC03_CPU_36) - Prevent VMMs from hiding support for AArch64 at any EL virtualized by KVM - Save/restore the host value for HCRX_EL2 instead of restoring an incorrect fixed value - Make host_stage2_set_owner_locked() check that the entire requested range is memory rather than just the first page RISC-V: - Add missing reset of smstateen CSRs x86: - Forcibly leave SMM on SHUTDOWN interception on AMD CPUs to avoid causing problems due to KVM stuffing INIT on SHUTDOWN (KVM needs to sanitize the VMCB as its state is undefined after SHUTDOWN, emulating INIT is the least awful choice). - Track the valid sync/dirty fields in kvm_run as a u64 to ensure KVM KVM doesn't goof a sanity check in the future. - Free obsolete roots when (re)loading the MMU to fix a bug where pre-faulting memory can get stuck due to always encountering a stale root. - When dumping GHCB state, use KVM's snapshot instead of the raw GHCB page to print state, so that KVM doesn't print stale/wrong information. - When changing memory attributes (e.g. shared <=> private), add potential hugepage ranges to the mmu_invalidate_range_{start,end} set so that KVM doesn't create a shared/private hugepage when the the corresponding attributes will become mixed (the attributes are commited *after* KVM finishes the invalidation). - Rework the SRSO mitigation to enable BP_SPEC_REDUCE only when KVM has at least one active VM. Effectively BP_SPEC_REDUCE when KVM is loaded led to very measurable performance regressions for non-KVM workloads" * tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm: KVM: SVM: Set/clear SRSO's BP_SPEC_REDUCE on 0 <=> 1 VM count transitions KVM: arm64: Fix memory check in host_stage2_set_owner_locked() KVM: arm64: Kill HCRX_HOST_FLAGS KVM: arm64: Properly save/restore HCRX_EL2 KVM: arm64: selftest: Don't try to disable AArch64 support KVM: arm64: Prevent userspace from disabling AArch64 support at any virtualisable EL KVM: arm64: Force HCR_EL2.xMO to 1 at all times in VHE mode KVM: arm64: Fix uninitialized memcache pointer in user_mem_abort() KVM: x86/mmu: Prevent installing hugepages when mem attributes are changing KVM: SVM: Update dump_ghcb() to use the GHCB snapshot fields KVM: RISC-V: reset smstateen CSRs KVM: x86/mmu: Check and free obsolete roots in kvm_mmu_reload() KVM: x86: Check that the high 32bits are clear in kvm_arch_vcpu_ioctl_run() KVM: SVM: Forcibly leave SMM mode on SHUTDOWN interception
2 parents ecb9194 + add2032 commit cd802e7

File tree

16 files changed

+200
-72
lines changed

16 files changed

+200
-72
lines changed

arch/arm64/include/asm/el2_setup.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -52,7 +52,7 @@
5252
mrs x0, id_aa64mmfr1_el1
5353
ubfx x0, x0, #ID_AA64MMFR1_EL1_HCX_SHIFT, #4
5454
cbz x0, .Lskip_hcrx_\@
55-
mov_q x0, HCRX_HOST_FLAGS
55+
mov_q x0, (HCRX_EL2_MSCEn | HCRX_EL2_TCR2En | HCRX_EL2_EnFPM)
5656

5757
/* Enable GCS if supported */
5858
mrs_s x1, SYS_ID_AA64PFR1_EL1

arch/arm64/include/asm/kvm_arm.h

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -100,9 +100,8 @@
100100
HCR_FMO | HCR_IMO | HCR_PTW | HCR_TID3 | HCR_TID1)
101101
#define HCR_HOST_NVHE_FLAGS (HCR_RW | HCR_API | HCR_APK | HCR_ATA)
102102
#define HCR_HOST_NVHE_PROTECTED_FLAGS (HCR_HOST_NVHE_FLAGS | HCR_TSC)
103-
#define HCR_HOST_VHE_FLAGS (HCR_RW | HCR_TGE | HCR_E2H)
103+
#define HCR_HOST_VHE_FLAGS (HCR_RW | HCR_TGE | HCR_E2H | HCR_AMO | HCR_IMO | HCR_FMO)
104104

105-
#define HCRX_HOST_FLAGS (HCRX_EL2_MSCEn | HCRX_EL2_TCR2En | HCRX_EL2_EnFPM)
106105
#define MPAMHCR_HOST_FLAGS 0
107106

108107
/* TCR_EL2 Registers bits */

arch/arm64/kvm/hyp/include/hyp/switch.h

Lines changed: 6 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -235,6 +235,8 @@ static inline void __deactivate_traps_mpam(void)
235235

236236
static inline void __activate_traps_common(struct kvm_vcpu *vcpu)
237237
{
238+
struct kvm_cpu_context *hctxt = host_data_ptr(host_ctxt);
239+
238240
/* Trap on AArch32 cp15 c15 (impdef sysregs) accesses (EL1 or EL0) */
239241
write_sysreg(1 << 15, hstr_el2);
240242

@@ -245,11 +247,8 @@ static inline void __activate_traps_common(struct kvm_vcpu *vcpu)
245247
* EL1 instead of being trapped to EL2.
246248
*/
247249
if (system_supports_pmuv3()) {
248-
struct kvm_cpu_context *hctxt;
249-
250250
write_sysreg(0, pmselr_el0);
251251

252-
hctxt = host_data_ptr(host_ctxt);
253252
ctxt_sys_reg(hctxt, PMUSERENR_EL0) = read_sysreg(pmuserenr_el0);
254253
write_sysreg(ARMV8_PMU_USERENR_MASK, pmuserenr_el0);
255254
vcpu_set_flag(vcpu, PMUSERENR_ON_CPU);
@@ -269,6 +268,7 @@ static inline void __activate_traps_common(struct kvm_vcpu *vcpu)
269268
hcrx &= ~clr;
270269
}
271270

271+
ctxt_sys_reg(hctxt, HCRX_EL2) = read_sysreg_s(SYS_HCRX_EL2);
272272
write_sysreg_s(hcrx, SYS_HCRX_EL2);
273273
}
274274

@@ -278,19 +278,18 @@ static inline void __activate_traps_common(struct kvm_vcpu *vcpu)
278278

279279
static inline void __deactivate_traps_common(struct kvm_vcpu *vcpu)
280280
{
281+
struct kvm_cpu_context *hctxt = host_data_ptr(host_ctxt);
282+
281283
write_sysreg(*host_data_ptr(host_debug_state.mdcr_el2), mdcr_el2);
282284

283285
write_sysreg(0, hstr_el2);
284286
if (system_supports_pmuv3()) {
285-
struct kvm_cpu_context *hctxt;
286-
287-
hctxt = host_data_ptr(host_ctxt);
288287
write_sysreg(ctxt_sys_reg(hctxt, PMUSERENR_EL0), pmuserenr_el0);
289288
vcpu_clear_flag(vcpu, PMUSERENR_ON_CPU);
290289
}
291290

292291
if (cpus_have_final_cap(ARM64_HAS_HCX))
293-
write_sysreg_s(HCRX_HOST_FLAGS, SYS_HCRX_EL2);
292+
write_sysreg_s(ctxt_sys_reg(hctxt, HCRX_EL2), SYS_HCRX_EL2);
294293

295294
__deactivate_traps_hfgxtr(vcpu);
296295
__deactivate_traps_mpam();

arch/arm64/kvm/hyp/nvhe/mem_protect.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -503,7 +503,7 @@ int host_stage2_set_owner_locked(phys_addr_t addr, u64 size, u8 owner_id)
503503
{
504504
int ret;
505505

506-
if (!addr_is_memory(addr))
506+
if (!range_is_memory(addr, addr + size))
507507
return -EPERM;
508508

509509
ret = host_stage2_try(kvm_pgtable_stage2_set_owner, &host_mmu.pgt,

arch/arm64/kvm/hyp/vgic-v3-sr.c

Lines changed: 21 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -429,35 +429,41 @@ u64 __vgic_v3_get_gic_config(void)
429429
/*
430430
* To check whether we have a MMIO-based (GICv2 compatible)
431431
* CPU interface, we need to disable the system register
432-
* view. To do that safely, we have to prevent any interrupt
433-
* from firing (which would be deadly).
432+
* view.
434433
*
435-
* Note that this only makes sense on VHE, as interrupts are
436-
* already masked for nVHE as part of the exception entry to
437-
* EL2.
438-
*/
439-
if (has_vhe())
440-
flags = local_daif_save();
441-
442-
/*
443434
* Table 11-2 "Permitted ICC_SRE_ELx.SRE settings" indicates
444435
* that to be able to set ICC_SRE_EL1.SRE to 0, all the
445436
* interrupt overrides must be set. You've got to love this.
437+
*
438+
* As we always run VHE with HCR_xMO set, no extra xMO
439+
* manipulation is required in that case.
440+
*
441+
* To safely disable SRE, we have to prevent any interrupt
442+
* from firing (which would be deadly). This only makes sense
443+
* on VHE, as interrupts are already masked for nVHE as part
444+
* of the exception entry to EL2.
446445
*/
447-
sysreg_clear_set(hcr_el2, 0, HCR_AMO | HCR_FMO | HCR_IMO);
448-
isb();
446+
if (has_vhe()) {
447+
flags = local_daif_save();
448+
} else {
449+
sysreg_clear_set(hcr_el2, 0, HCR_AMO | HCR_FMO | HCR_IMO);
450+
isb();
451+
}
452+
449453
write_gicreg(0, ICC_SRE_EL1);
450454
isb();
451455

452456
val = read_gicreg(ICC_SRE_EL1);
453457

454458
write_gicreg(sre, ICC_SRE_EL1);
455459
isb();
456-
sysreg_clear_set(hcr_el2, HCR_AMO | HCR_FMO | HCR_IMO, 0);
457-
isb();
458460

459-
if (has_vhe())
461+
if (has_vhe()) {
460462
local_daif_restore(flags);
463+
} else {
464+
sysreg_clear_set(hcr_el2, HCR_AMO | HCR_FMO | HCR_IMO, 0);
465+
isb();
466+
}
461467

462468
val = (val & ICC_SRE_EL1_SRE) ? 0 : (1ULL << 63);
463469
val |= read_gicreg(ICH_VTR_EL2);

arch/arm64/kvm/mmu.c

Lines changed: 8 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -1501,6 +1501,11 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
15011501
return -EFAULT;
15021502
}
15031503

1504+
if (!is_protected_kvm_enabled())
1505+
memcache = &vcpu->arch.mmu_page_cache;
1506+
else
1507+
memcache = &vcpu->arch.pkvm_memcache;
1508+
15041509
/*
15051510
* Permission faults just need to update the existing leaf entry,
15061511
* and so normally don't require allocations from the memcache. The
@@ -1510,13 +1515,11 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
15101515
if (!fault_is_perm || (logging_active && write_fault)) {
15111516
int min_pages = kvm_mmu_cache_min_pages(vcpu->arch.hw_mmu);
15121517

1513-
if (!is_protected_kvm_enabled()) {
1514-
memcache = &vcpu->arch.mmu_page_cache;
1518+
if (!is_protected_kvm_enabled())
15151519
ret = kvm_mmu_topup_memory_cache(memcache, min_pages);
1516-
} else {
1517-
memcache = &vcpu->arch.pkvm_memcache;
1520+
else
15181521
ret = topup_hyp_memcache(memcache, min_pages);
1519-
}
1522+
15201523
if (ret)
15211524
return ret;
15221525
}

arch/arm64/kvm/sys_regs.c

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1945,6 +1945,12 @@ static int set_id_aa64pfr0_el1(struct kvm_vcpu *vcpu,
19451945
if ((hw_val & mpam_mask) == (user_val & mpam_mask))
19461946
user_val &= ~ID_AA64PFR0_EL1_MPAM_MASK;
19471947

1948+
/* Fail the guest's request to disable the AA64 ISA at EL{0,1,2} */
1949+
if (!FIELD_GET(ID_AA64PFR0_EL1_EL0, user_val) ||
1950+
!FIELD_GET(ID_AA64PFR0_EL1_EL1, user_val) ||
1951+
(vcpu_has_nv(vcpu) && !FIELD_GET(ID_AA64PFR0_EL1_EL2, user_val)))
1952+
return -EINVAL;
1953+
19481954
return set_id_reg(vcpu, rd, user_val);
19491955
}
19501956

arch/riscv/kvm/vcpu.c

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -77,6 +77,8 @@ static void kvm_riscv_reset_vcpu(struct kvm_vcpu *vcpu)
7777
memcpy(cntx, reset_cntx, sizeof(*cntx));
7878
spin_unlock(&vcpu->arch.reset_cntx_lock);
7979

80+
memset(&vcpu->arch.smstateen_csr, 0, sizeof(vcpu->arch.smstateen_csr));
81+
8082
kvm_riscv_vcpu_fp_reset(vcpu);
8183

8284
kvm_riscv_vcpu_vector_reset(vcpu);

arch/x86/kvm/mmu.h

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -104,6 +104,9 @@ void kvm_mmu_track_write(struct kvm_vcpu *vcpu, gpa_t gpa, const u8 *new,
104104

105105
static inline int kvm_mmu_reload(struct kvm_vcpu *vcpu)
106106
{
107+
if (kvm_check_request(KVM_REQ_MMU_FREE_OBSOLETE_ROOTS, vcpu))
108+
kvm_mmu_free_obsolete_roots(vcpu);
109+
107110
/*
108111
* Checking root.hpa is sufficient even when KVM has mirror root.
109112
* We can have either:

arch/x86/kvm/mmu/mmu.c

Lines changed: 54 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -5974,6 +5974,7 @@ void kvm_mmu_free_obsolete_roots(struct kvm_vcpu *vcpu)
59745974
__kvm_mmu_free_obsolete_roots(vcpu->kvm, &vcpu->arch.root_mmu);
59755975
__kvm_mmu_free_obsolete_roots(vcpu->kvm, &vcpu->arch.guest_mmu);
59765976
}
5977+
EXPORT_SYMBOL_GPL(kvm_mmu_free_obsolete_roots);
59775978

59785979
static u64 mmu_pte_write_fetch_gpte(struct kvm_vcpu *vcpu, gpa_t *gpa,
59795980
int *bytes)
@@ -7669,9 +7670,30 @@ void kvm_mmu_pre_destroy_vm(struct kvm *kvm)
76697670
}
76707671

76717672
#ifdef CONFIG_KVM_GENERIC_MEMORY_ATTRIBUTES
7673+
static bool hugepage_test_mixed(struct kvm_memory_slot *slot, gfn_t gfn,
7674+
int level)
7675+
{
7676+
return lpage_info_slot(gfn, slot, level)->disallow_lpage & KVM_LPAGE_MIXED_FLAG;
7677+
}
7678+
7679+
static void hugepage_clear_mixed(struct kvm_memory_slot *slot, gfn_t gfn,
7680+
int level)
7681+
{
7682+
lpage_info_slot(gfn, slot, level)->disallow_lpage &= ~KVM_LPAGE_MIXED_FLAG;
7683+
}
7684+
7685+
static void hugepage_set_mixed(struct kvm_memory_slot *slot, gfn_t gfn,
7686+
int level)
7687+
{
7688+
lpage_info_slot(gfn, slot, level)->disallow_lpage |= KVM_LPAGE_MIXED_FLAG;
7689+
}
7690+
76727691
bool kvm_arch_pre_set_memory_attributes(struct kvm *kvm,
76737692
struct kvm_gfn_range *range)
76747693
{
7694+
struct kvm_memory_slot *slot = range->slot;
7695+
int level;
7696+
76757697
/*
76767698
* Zap SPTEs even if the slot can't be mapped PRIVATE. KVM x86 only
76777699
* supports KVM_MEMORY_ATTRIBUTE_PRIVATE, and so it *seems* like KVM
@@ -7686,6 +7708,38 @@ bool kvm_arch_pre_set_memory_attributes(struct kvm *kvm,
76867708
if (WARN_ON_ONCE(!kvm_arch_has_private_mem(kvm)))
76877709
return false;
76887710

7711+
if (WARN_ON_ONCE(range->end <= range->start))
7712+
return false;
7713+
7714+
/*
7715+
* If the head and tail pages of the range currently allow a hugepage,
7716+
* i.e. reside fully in the slot and don't have mixed attributes, then
7717+
* add each corresponding hugepage range to the ongoing invalidation,
7718+
* e.g. to prevent KVM from creating a hugepage in response to a fault
7719+
* for a gfn whose attributes aren't changing. Note, only the range
7720+
* of gfns whose attributes are being modified needs to be explicitly
7721+
* unmapped, as that will unmap any existing hugepages.
7722+
*/
7723+
for (level = PG_LEVEL_2M; level <= KVM_MAX_HUGEPAGE_LEVEL; level++) {
7724+
gfn_t start = gfn_round_for_level(range->start, level);
7725+
gfn_t end = gfn_round_for_level(range->end - 1, level);
7726+
gfn_t nr_pages = KVM_PAGES_PER_HPAGE(level);
7727+
7728+
if ((start != range->start || start + nr_pages > range->end) &&
7729+
start >= slot->base_gfn &&
7730+
start + nr_pages <= slot->base_gfn + slot->npages &&
7731+
!hugepage_test_mixed(slot, start, level))
7732+
kvm_mmu_invalidate_range_add(kvm, start, start + nr_pages);
7733+
7734+
if (end == start)
7735+
continue;
7736+
7737+
if ((end + nr_pages) > range->end &&
7738+
(end + nr_pages) <= (slot->base_gfn + slot->npages) &&
7739+
!hugepage_test_mixed(slot, end, level))
7740+
kvm_mmu_invalidate_range_add(kvm, end, end + nr_pages);
7741+
}
7742+
76897743
/* Unmap the old attribute page. */
76907744
if (range->arg.attributes & KVM_MEMORY_ATTRIBUTE_PRIVATE)
76917745
range->attr_filter = KVM_FILTER_SHARED;
@@ -7695,23 +7749,7 @@ bool kvm_arch_pre_set_memory_attributes(struct kvm *kvm,
76957749
return kvm_unmap_gfn_range(kvm, range);
76967750
}
76977751

7698-
static bool hugepage_test_mixed(struct kvm_memory_slot *slot, gfn_t gfn,
7699-
int level)
7700-
{
7701-
return lpage_info_slot(gfn, slot, level)->disallow_lpage & KVM_LPAGE_MIXED_FLAG;
7702-
}
7703-
7704-
static void hugepage_clear_mixed(struct kvm_memory_slot *slot, gfn_t gfn,
7705-
int level)
7706-
{
7707-
lpage_info_slot(gfn, slot, level)->disallow_lpage &= ~KVM_LPAGE_MIXED_FLAG;
7708-
}
77097752

7710-
static void hugepage_set_mixed(struct kvm_memory_slot *slot, gfn_t gfn,
7711-
int level)
7712-
{
7713-
lpage_info_slot(gfn, slot, level)->disallow_lpage |= KVM_LPAGE_MIXED_FLAG;
7714-
}
77157753

77167754
static bool hugepage_has_attrs(struct kvm *kvm, struct kvm_memory_slot *slot,
77177755
gfn_t gfn, int level, unsigned long attrs)

arch/x86/kvm/smm.c

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -131,6 +131,7 @@ void kvm_smm_changed(struct kvm_vcpu *vcpu, bool entering_smm)
131131

132132
kvm_mmu_reset_context(vcpu);
133133
}
134+
EXPORT_SYMBOL_GPL(kvm_smm_changed);
134135

135136
void process_smi(struct kvm_vcpu *vcpu)
136137
{

arch/x86/kvm/svm/sev.c

Lines changed: 19 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -3173,9 +3173,14 @@ void sev_free_vcpu(struct kvm_vcpu *vcpu)
31733173
kvfree(svm->sev_es.ghcb_sa);
31743174
}
31753175

3176+
static u64 kvm_ghcb_get_sw_exit_code(struct vmcb_control_area *control)
3177+
{
3178+
return (((u64)control->exit_code_hi) << 32) | control->exit_code;
3179+
}
3180+
31763181
static void dump_ghcb(struct vcpu_svm *svm)
31773182
{
3178-
struct ghcb *ghcb = svm->sev_es.ghcb;
3183+
struct vmcb_control_area *control = &svm->vmcb->control;
31793184
unsigned int nbits;
31803185

31813186
/* Re-use the dump_invalid_vmcb module parameter */
@@ -3184,18 +3189,24 @@ static void dump_ghcb(struct vcpu_svm *svm)
31843189
return;
31853190
}
31863191

3187-
nbits = sizeof(ghcb->save.valid_bitmap) * 8;
3192+
nbits = sizeof(svm->sev_es.valid_bitmap) * 8;
31883193

3189-
pr_err("GHCB (GPA=%016llx):\n", svm->vmcb->control.ghcb_gpa);
3194+
/*
3195+
* Print KVM's snapshot of the GHCB values that were (unsuccessfully)
3196+
* used to handle the exit. If the guest has since modified the GHCB
3197+
* itself, dumping the raw GHCB won't help debug why KVM was unable to
3198+
* handle the VMGEXIT that KVM observed.
3199+
*/
3200+
pr_err("GHCB (GPA=%016llx) snapshot:\n", svm->vmcb->control.ghcb_gpa);
31903201
pr_err("%-20s%016llx is_valid: %u\n", "sw_exit_code",
3191-
ghcb->save.sw_exit_code, ghcb_sw_exit_code_is_valid(ghcb));
3202+
kvm_ghcb_get_sw_exit_code(control), kvm_ghcb_sw_exit_code_is_valid(svm));
31923203
pr_err("%-20s%016llx is_valid: %u\n", "sw_exit_info_1",
3193-
ghcb->save.sw_exit_info_1, ghcb_sw_exit_info_1_is_valid(ghcb));
3204+
control->exit_info_1, kvm_ghcb_sw_exit_info_1_is_valid(svm));
31943205
pr_err("%-20s%016llx is_valid: %u\n", "sw_exit_info_2",
3195-
ghcb->save.sw_exit_info_2, ghcb_sw_exit_info_2_is_valid(ghcb));
3206+
control->exit_info_2, kvm_ghcb_sw_exit_info_2_is_valid(svm));
31963207
pr_err("%-20s%016llx is_valid: %u\n", "sw_scratch",
3197-
ghcb->save.sw_scratch, ghcb_sw_scratch_is_valid(ghcb));
3198-
pr_err("%-20s%*pb\n", "valid_bitmap", nbits, ghcb->save.valid_bitmap);
3208+
svm->sev_es.sw_scratch, kvm_ghcb_sw_scratch_is_valid(svm));
3209+
pr_err("%-20s%*pb\n", "valid_bitmap", nbits, svm->sev_es.valid_bitmap);
31993210
}
32003211

32013212
static void sev_es_sync_to_ghcb(struct vcpu_svm *svm)
@@ -3266,11 +3277,6 @@ static void sev_es_sync_from_ghcb(struct vcpu_svm *svm)
32663277
memset(ghcb->save.valid_bitmap, 0, sizeof(ghcb->save.valid_bitmap));
32673278
}
32683279

3269-
static u64 kvm_ghcb_get_sw_exit_code(struct vmcb_control_area *control)
3270-
{
3271-
return (((u64)control->exit_code_hi) << 32) | control->exit_code;
3272-
}
3273-
32743280
static int sev_es_validate_vmgexit(struct vcpu_svm *svm)
32753281
{
32763282
struct vmcb_control_area *control = &svm->vmcb->control;

0 commit comments

Comments
 (0)