Skip to content

Commit fcd1ec9

Browse files
committed
KVM: x86/mmu: fix KVM_X86_QUIRK_SLOT_ZAP_ALL for shadow MMU
As was tried in commit 4e10313 ("KVM: x86/mmu: Zap only the relevant pages when removing a memslot"), all shadow pages, i.e. non-leaf SPTEs, need to be zapped. All of the accounting for a shadow page is tied to the memslot, i.e. the shadow page holds a reference to the memslot, for all intents and purposes. Deleting the memslot without removing all relevant shadow pages, as is done when KVM_X86_QUIRK_SLOT_ZAP_ALL is disabled, results in NULL pointer derefs when tearing down the VM. Reintroduce from that commit the code that walks the whole memslot when there are active shadow MMU pages. Signed-off-by: Paolo Bonzini <[email protected]>
1 parent 76f972c commit fcd1ec9

File tree

1 file changed

+46
-14
lines changed

1 file changed

+46
-14
lines changed

arch/x86/kvm/mmu/mmu.c

Lines changed: 46 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -1884,10 +1884,14 @@ static bool sp_has_gptes(struct kvm_mmu_page *sp)
18841884
if (is_obsolete_sp((_kvm), (_sp))) { \
18851885
} else
18861886

1887-
#define for_each_gfn_valid_sp_with_gptes(_kvm, _sp, _gfn) \
1887+
#define for_each_gfn_valid_sp(_kvm, _sp, _gfn) \
18881888
for_each_valid_sp(_kvm, _sp, \
18891889
&(_kvm)->arch.mmu_page_hash[kvm_page_table_hashfn(_gfn)]) \
1890-
if ((_sp)->gfn != (_gfn) || !sp_has_gptes(_sp)) {} else
1890+
if ((_sp)->gfn != (_gfn)) {} else
1891+
1892+
#define for_each_gfn_valid_sp_with_gptes(_kvm, _sp, _gfn) \
1893+
for_each_gfn_valid_sp(_kvm, _sp, _gfn) \
1894+
if (!sp_has_gptes(_sp)) {} else
18911895

18921896
static bool kvm_sync_page_check(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
18931897
{
@@ -7049,26 +7053,54 @@ void kvm_arch_flush_shadow_all(struct kvm *kvm)
70497053
kvm_mmu_zap_all(kvm);
70507054
}
70517055

7052-
/*
7053-
* Zapping leaf SPTEs with memslot range when a memslot is moved/deleted.
7054-
*
7055-
* Zapping non-leaf SPTEs, a.k.a. not-last SPTEs, isn't required, worst
7056-
* case scenario we'll have unused shadow pages lying around until they
7057-
* are recycled due to age or when the VM is destroyed.
7058-
*/
7059-
static void kvm_mmu_zap_memslot_leafs(struct kvm *kvm, struct kvm_memory_slot *slot)
7056+
static void kvm_mmu_zap_memslot_pages_and_flush(struct kvm *kvm,
7057+
struct kvm_memory_slot *slot,
7058+
bool flush)
7059+
{
7060+
LIST_HEAD(invalid_list);
7061+
unsigned long i;
7062+
7063+
if (list_empty(&kvm->arch.active_mmu_pages))
7064+
goto out_flush;
7065+
7066+
/*
7067+
* Since accounting information is stored in struct kvm_arch_memory_slot,
7068+
* shadow pages deletion (e.g. unaccount_shadowed()) requires that all
7069+
* gfns with a shadow page have a corresponding memslot. Do so before
7070+
* the memslot goes away.
7071+
*/
7072+
for (i = 0; i < slot->npages; i++) {
7073+
struct kvm_mmu_page *sp;
7074+
gfn_t gfn = slot->base_gfn + i;
7075+
7076+
for_each_gfn_valid_sp(kvm, sp, gfn)
7077+
kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list);
7078+
7079+
if (need_resched() || rwlock_needbreak(&kvm->mmu_lock)) {
7080+
kvm_mmu_remote_flush_or_zap(kvm, &invalid_list, flush);
7081+
flush = false;
7082+
cond_resched_rwlock_write(&kvm->mmu_lock);
7083+
}
7084+
}
7085+
7086+
out_flush:
7087+
kvm_mmu_remote_flush_or_zap(kvm, &invalid_list, flush);
7088+
}
7089+
7090+
static void kvm_mmu_zap_memslot(struct kvm *kvm,
7091+
struct kvm_memory_slot *slot)
70607092
{
70617093
struct kvm_gfn_range range = {
70627094
.slot = slot,
70637095
.start = slot->base_gfn,
70647096
.end = slot->base_gfn + slot->npages,
70657097
.may_block = true,
70667098
};
7099+
bool flush;
70677100

70687101
write_lock(&kvm->mmu_lock);
7069-
if (kvm_unmap_gfn_range(kvm, &range))
7070-
kvm_flush_remote_tlbs_memslot(kvm, slot);
7071-
7102+
flush = kvm_unmap_gfn_range(kvm, &range);
7103+
kvm_mmu_zap_memslot_pages_and_flush(kvm, slot, flush);
70727104
write_unlock(&kvm->mmu_lock);
70737105
}
70747106

@@ -7084,7 +7116,7 @@ void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
70847116
if (kvm_memslot_flush_zap_all(kvm))
70857117
kvm_mmu_zap_all_fast(kvm);
70867118
else
7087-
kvm_mmu_zap_memslot_leafs(kvm, slot);
7119+
kvm_mmu_zap_memslot(kvm, slot);
70887120
}
70897121

70907122
void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm, u64 gen)

0 commit comments

Comments
 (0)