Skip to content

Commit c258b62

Browse files
Xiao Guangrongbonzini
authored andcommitted
KVM: MMU: introduce the framework to check zero bits on sptes
We have abstracted the data struct and functions which are used to check reserved bit on guest page tables, now we extend the logic to check zero bits on shadow page tables The zero bits on sptes include not only reserved bits on hardware but also the bits that SPTEs willnever use. For example, shadow pages will never use GB pages unless the guest uses them too. Signed-off-by: Xiao Guangrong <[email protected]> Signed-off-by: Paolo Bonzini <[email protected]>
1 parent 81b8eeb commit c258b62

File tree

4 files changed

+62
-0
lines changed

4 files changed

+62
-0
lines changed

arch/x86/include/asm/kvm_host.h

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -294,6 +294,14 @@ struct kvm_mmu {
294294

295295
u64 *pae_root;
296296
u64 *lm_root;
297+
298+
/*
299+
* check zero bits on shadow page table entries, these
300+
* bits include not only hardware reserved bits but also
301+
* the bits spte never used.
302+
*/
303+
struct rsvd_bits_validate shadow_zero_check;
304+
297305
struct rsvd_bits_validate guest_rsvd_check;
298306

299307
/*

arch/x86/kvm/mmu.c

Lines changed: 50 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -3699,6 +3699,53 @@ static void reset_rsvds_bits_mask_ept(struct kvm_vcpu *vcpu,
36993699
cpuid_maxphyaddr(vcpu), execonly);
37003700
}
37013701

3702+
/*
3703+
* the page table on host is the shadow page table for the page
3704+
* table in guest or amd nested guest, its mmu features completely
3705+
* follow the features in guest.
3706+
*/
3707+
void
3708+
reset_shadow_zero_bits_mask(struct kvm_vcpu *vcpu, struct kvm_mmu *context)
3709+
{
3710+
__reset_rsvds_bits_mask(vcpu, &context->shadow_zero_check,
3711+
boot_cpu_data.x86_phys_bits,
3712+
context->shadow_root_level, context->nx,
3713+
guest_cpuid_has_gbpages(vcpu), is_pse(vcpu));
3714+
}
3715+
EXPORT_SYMBOL_GPL(reset_shadow_zero_bits_mask);
3716+
3717+
/*
3718+
* the direct page table on host, use as much mmu features as
3719+
* possible, however, kvm currently does not do execution-protection.
3720+
*/
3721+
static void
3722+
reset_tdp_shadow_zero_bits_mask(struct kvm_vcpu *vcpu,
3723+
struct kvm_mmu *context)
3724+
{
3725+
if (guest_cpuid_is_amd(vcpu))
3726+
__reset_rsvds_bits_mask(vcpu, &context->shadow_zero_check,
3727+
boot_cpu_data.x86_phys_bits,
3728+
context->shadow_root_level, false,
3729+
cpu_has_gbpages, true);
3730+
else
3731+
__reset_rsvds_bits_mask_ept(&context->shadow_zero_check,
3732+
boot_cpu_data.x86_phys_bits,
3733+
false);
3734+
3735+
}
3736+
3737+
/*
3738+
* as the comments in reset_shadow_zero_bits_mask() except it
3739+
* is the shadow page table for intel nested guest.
3740+
*/
3741+
static void
3742+
reset_ept_shadow_zero_bits_mask(struct kvm_vcpu *vcpu,
3743+
struct kvm_mmu *context, bool execonly)
3744+
{
3745+
__reset_rsvds_bits_mask_ept(&context->shadow_zero_check,
3746+
boot_cpu_data.x86_phys_bits, execonly);
3747+
}
3748+
37023749
static void update_permission_bitmask(struct kvm_vcpu *vcpu,
37033750
struct kvm_mmu *mmu, bool ept)
37043751
{
@@ -3877,6 +3924,7 @@ static void init_kvm_tdp_mmu(struct kvm_vcpu *vcpu)
38773924

38783925
update_permission_bitmask(vcpu, context, false);
38793926
update_last_pte_bitmap(vcpu, context);
3927+
reset_tdp_shadow_zero_bits_mask(vcpu, context);
38803928
}
38813929

38823930
void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu)
@@ -3904,6 +3952,7 @@ void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu)
39043952
context->base_role.smap_andnot_wp
39053953
= smap && !is_write_protection(vcpu);
39063954
context->base_role.smm = is_smm(vcpu);
3955+
reset_shadow_zero_bits_mask(vcpu, context);
39073956
}
39083957
EXPORT_SYMBOL_GPL(kvm_init_shadow_mmu);
39093958

@@ -3927,6 +3976,7 @@ void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly)
39273976

39283977
update_permission_bitmask(vcpu, context, true);
39293978
reset_rsvds_bits_mask_ept(vcpu, context, execonly);
3979+
reset_ept_shadow_zero_bits_mask(vcpu, context, execonly);
39303980
}
39313981
EXPORT_SYMBOL_GPL(kvm_init_shadow_ept_mmu);
39323982

arch/x86/kvm/mmu.h

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -53,6 +53,9 @@ static inline u64 rsvd_bits(int s, int e)
5353
int kvm_mmu_get_spte_hierarchy(struct kvm_vcpu *vcpu, u64 addr, u64 sptes[4]);
5454
void kvm_mmu_set_mmio_spte_mask(u64 mmio_mask);
5555

56+
void
57+
reset_shadow_zero_bits_mask(struct kvm_vcpu *vcpu, struct kvm_mmu *context);
58+
5659
/*
5760
* Return values of handle_mmio_page_fault_common:
5861
* RET_MMIO_PF_EMULATE: it is a real mmio page fault, emulate the instruction

arch/x86/kvm/svm.c

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2107,6 +2107,7 @@ static void nested_svm_init_mmu_context(struct kvm_vcpu *vcpu)
21072107
vcpu->arch.mmu.get_pdptr = nested_svm_get_tdp_pdptr;
21082108
vcpu->arch.mmu.inject_page_fault = nested_svm_inject_npf_exit;
21092109
vcpu->arch.mmu.shadow_root_level = get_npt_level();
2110+
reset_shadow_zero_bits_mask(vcpu, &vcpu->arch.mmu);
21102111
vcpu->arch.walk_mmu = &vcpu->arch.nested_mmu;
21112112
}
21122113

0 commit comments

Comments
 (0)