Skip to content

Commit d923ff2

Browse files
sean-jcbonzini
authored andcommitted
KVM: MIPS/MMU: Convert to the gfn-based MMU notifier callbacks
Move MIPS to the gfn-based MMU notifier APIs, which do the hva->gfn lookup in common code, and whose code is nearly identical to MIPS' lookup. No meaningful functional change intended, though the exact order of operations is slightly different since the memslot lookups occur before calling into arch code. Signed-off-by: Sean Christopherson <[email protected]> Message-Id: <[email protected]> Signed-off-by: Paolo Bonzini <[email protected]>
1 parent cd4c718 commit d923ff2

File tree

2 files changed

+14
-79
lines changed

2 files changed

+14
-79
lines changed

arch/mips/include/asm/kvm_host.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -960,6 +960,7 @@ enum kvm_mips_fault_result kvm_trap_emul_gva_fault(struct kvm_vcpu *vcpu,
960960
bool write);
961961

962962
#define KVM_ARCH_WANT_MMU_NOTIFIER
963+
#define KVM_ARCH_WANT_NEW_MMU_NOTIFIER_APIS
963964

964965
/* Emulation */
965966
int kvm_get_inst(u32 *opc, struct kvm_vcpu *vcpu, u32 *out);

arch/mips/kvm/mmu.c

Lines changed: 13 additions & 79 deletions
Original file line numberDiff line numberDiff line change
@@ -439,82 +439,34 @@ static int kvm_mips_mkold_gpa_pt(struct kvm *kvm, gfn_t start_gfn,
439439
end_gfn << PAGE_SHIFT);
440440
}
441441

442-
static int handle_hva_to_gpa(struct kvm *kvm,
443-
unsigned long start,
444-
unsigned long end,
445-
int (*handler)(struct kvm *kvm, gfn_t gfn,
446-
gpa_t gfn_end,
447-
struct kvm_memory_slot *memslot,
448-
void *data),
449-
void *data)
442+
bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range)
450443
{
451-
struct kvm_memslots *slots;
452-
struct kvm_memory_slot *memslot;
453-
int ret = 0;
454-
455-
slots = kvm_memslots(kvm);
456-
457-
/* we only care about the pages that the guest sees */
458-
kvm_for_each_memslot(memslot, slots) {
459-
unsigned long hva_start, hva_end;
460-
gfn_t gfn, gfn_end;
461-
462-
hva_start = max(start, memslot->userspace_addr);
463-
hva_end = min(end, memslot->userspace_addr +
464-
(memslot->npages << PAGE_SHIFT));
465-
if (hva_start >= hva_end)
466-
continue;
467-
468-
/*
469-
* {gfn(page) | page intersects with [hva_start, hva_end)} =
470-
* {gfn_start, gfn_start+1, ..., gfn_end-1}.
471-
*/
472-
gfn = hva_to_gfn_memslot(hva_start, memslot);
473-
gfn_end = hva_to_gfn_memslot(hva_end + PAGE_SIZE - 1, memslot);
474-
475-
ret |= handler(kvm, gfn, gfn_end, memslot, data);
476-
}
477-
478-
return ret;
479-
}
480-
481-
482-
static int kvm_unmap_hva_handler(struct kvm *kvm, gfn_t gfn, gfn_t gfn_end,
483-
struct kvm_memory_slot *memslot, void *data)
484-
{
485-
kvm_mips_flush_gpa_pt(kvm, gfn, gfn_end);
444+
kvm_mips_flush_gpa_pt(kvm, range->start, range->end);
486445
return 1;
487446
}
488447

489-
int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end,
490-
unsigned flags)
491-
{
492-
return handle_hva_to_gpa(kvm, start, end, &kvm_unmap_hva_handler, NULL);
493-
}
494-
495-
static int kvm_set_spte_handler(struct kvm *kvm, gfn_t gfn, gfn_t gfn_end,
496-
struct kvm_memory_slot *memslot, void *data)
448+
bool kvm_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
497449
{
498-
gpa_t gpa = gfn << PAGE_SHIFT;
499-
pte_t hva_pte = *(pte_t *)data;
450+
gpa_t gpa = range->start << PAGE_SHIFT;
451+
pte_t hva_pte = range->pte;
500452
pte_t *gpa_pte = kvm_mips_pte_for_gpa(kvm, NULL, gpa);
501453
pte_t old_pte;
502454

503455
if (!gpa_pte)
504-
return 0;
456+
return false;
505457

506458
/* Mapping may need adjusting depending on memslot flags */
507459
old_pte = *gpa_pte;
508-
if (memslot->flags & KVM_MEM_LOG_DIRTY_PAGES && !pte_dirty(old_pte))
460+
if (range->slot->flags & KVM_MEM_LOG_DIRTY_PAGES && !pte_dirty(old_pte))
509461
hva_pte = pte_mkclean(hva_pte);
510-
else if (memslot->flags & KVM_MEM_READONLY)
462+
else if (range->slot->flags & KVM_MEM_READONLY)
511463
hva_pte = pte_wrprotect(hva_pte);
512464

513465
set_pte(gpa_pte, hva_pte);
514466

515467
/* Replacing an absent or old page doesn't need flushes */
516468
if (!pte_present(old_pte) || !pte_young(old_pte))
517-
return 0;
469+
return false;
518470

519471
/* Pages swapped, aged, moved, or cleaned require flushes */
520472
return !pte_present(hva_pte) ||
@@ -523,39 +475,21 @@ static int kvm_set_spte_handler(struct kvm *kvm, gfn_t gfn, gfn_t gfn_end,
523475
(pte_dirty(old_pte) && !pte_dirty(hva_pte));
524476
}
525477

526-
int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte)
478+
bool kvm_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
527479
{
528-
unsigned long end = hva + PAGE_SIZE;
529-
return handle_hva_to_gpa(kvm, hva, end, &kvm_set_spte_handler, &pte);
480+
return kvm_mips_mkold_gpa_pt(kvm, range->start, range->end);
530481
}
531482

532-
static int kvm_age_hva_handler(struct kvm *kvm, gfn_t gfn, gfn_t gfn_end,
533-
struct kvm_memory_slot *memslot, void *data)
483+
bool kvm_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
534484
{
535-
return kvm_mips_mkold_gpa_pt(kvm, gfn, gfn_end);
536-
}
537-
538-
static int kvm_test_age_hva_handler(struct kvm *kvm, gfn_t gfn, gfn_t gfn_end,
539-
struct kvm_memory_slot *memslot, void *data)
540-
{
541-
gpa_t gpa = gfn << PAGE_SHIFT;
485+
gpa_t gpa = range->start << PAGE_SHIFT;
542486
pte_t *gpa_pte = kvm_mips_pte_for_gpa(kvm, NULL, gpa);
543487

544488
if (!gpa_pte)
545489
return 0;
546490
return pte_young(*gpa_pte);
547491
}
548492

549-
int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end)
550-
{
551-
return handle_hva_to_gpa(kvm, start, end, kvm_age_hva_handler, NULL);
552-
}
553-
554-
int kvm_test_age_hva(struct kvm *kvm, unsigned long hva)
555-
{
556-
return handle_hva_to_gpa(kvm, hva, hva, kvm_test_age_hva_handler, NULL);
557-
}
558-
559493
/**
560494
* _kvm_mips_map_page_fast() - Fast path GPA fault handler.
561495
* @vcpu: VCPU pointer.

0 commit comments

Comments
 (0)