Skip to content

Commit e234832

Browse files
committed
Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm
Pull KVM fixes from Paolo Bonzini: "ARM fixes. There are a couple pending x86 patches but they'll have to wait for next week" * tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm: KVM: arm/arm64: vgic: Kick VCPUs when queueing already pending IRQs KVM: arm/arm64: vgic: Prevent access to invalid SPIs arm/arm64: KVM: Perform local TLB invalidation when multiplexing vcpus on a single CPU
2 parents e861d89 + 05d36a7 commit e234832

File tree

12 files changed

+112
-23
lines changed

12 files changed

+112
-23
lines changed

arch/arm/include/asm/kvm_asm.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -66,6 +66,7 @@ extern char __kvm_hyp_vector[];
6666
extern void __kvm_flush_vm_context(void);
6767
extern void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa);
6868
extern void __kvm_tlb_flush_vmid(struct kvm *kvm);
69+
extern void __kvm_tlb_flush_local_vmid(struct kvm_vcpu *vcpu);
6970

7071
extern int __kvm_vcpu_run(struct kvm_vcpu *vcpu);
7172

arch/arm/include/asm/kvm_host.h

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -57,6 +57,9 @@ struct kvm_arch {
5757
/* VTTBR value associated with below pgd and vmid */
5858
u64 vttbr;
5959

60+
/* The last vcpu id that ran on each physical CPU */
61+
int __percpu *last_vcpu_ran;
62+
6063
/* Timer */
6164
struct arch_timer_kvm timer;
6265

arch/arm/include/asm/kvm_hyp.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -71,6 +71,7 @@
7171
#define ICIALLUIS __ACCESS_CP15(c7, 0, c1, 0)
7272
#define ATS1CPR __ACCESS_CP15(c7, 0, c8, 0)
7373
#define TLBIALLIS __ACCESS_CP15(c8, 0, c3, 0)
74+
#define TLBIALL __ACCESS_CP15(c8, 0, c7, 0)
7475
#define TLBIALLNSNHIS __ACCESS_CP15(c8, 4, c3, 4)
7576
#define PRRR __ACCESS_CP15(c10, 0, c2, 0)
7677
#define NMRR __ACCESS_CP15(c10, 0, c2, 1)

arch/arm/kvm/arm.c

Lines changed: 26 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -114,11 +114,18 @@ void kvm_arch_check_processor_compat(void *rtn)
114114
*/
115115
int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
116116
{
117-
int ret = 0;
117+
int ret, cpu;
118118

119119
if (type)
120120
return -EINVAL;
121121

122+
kvm->arch.last_vcpu_ran = alloc_percpu(typeof(*kvm->arch.last_vcpu_ran));
123+
if (!kvm->arch.last_vcpu_ran)
124+
return -ENOMEM;
125+
126+
for_each_possible_cpu(cpu)
127+
*per_cpu_ptr(kvm->arch.last_vcpu_ran, cpu) = -1;
128+
122129
ret = kvm_alloc_stage2_pgd(kvm);
123130
if (ret)
124131
goto out_fail_alloc;
@@ -141,6 +148,8 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
141148
out_free_stage2_pgd:
142149
kvm_free_stage2_pgd(kvm);
143150
out_fail_alloc:
151+
free_percpu(kvm->arch.last_vcpu_ran);
152+
kvm->arch.last_vcpu_ran = NULL;
144153
return ret;
145154
}
146155

@@ -168,6 +177,9 @@ void kvm_arch_destroy_vm(struct kvm *kvm)
168177
{
169178
int i;
170179

180+
free_percpu(kvm->arch.last_vcpu_ran);
181+
kvm->arch.last_vcpu_ran = NULL;
182+
171183
for (i = 0; i < KVM_MAX_VCPUS; ++i) {
172184
if (kvm->vcpus[i]) {
173185
kvm_arch_vcpu_free(kvm->vcpus[i]);
@@ -312,6 +324,19 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
312324

313325
void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
314326
{
327+
int *last_ran;
328+
329+
last_ran = this_cpu_ptr(vcpu->kvm->arch.last_vcpu_ran);
330+
331+
/*
332+
* We might get preempted before the vCPU actually runs, but
333+
* over-invalidation doesn't affect correctness.
334+
*/
335+
if (*last_ran != vcpu->vcpu_id) {
336+
kvm_call_hyp(__kvm_tlb_flush_local_vmid, vcpu);
337+
*last_ran = vcpu->vcpu_id;
338+
}
339+
315340
vcpu->cpu = cpu;
316341
vcpu->arch.host_cpu_context = this_cpu_ptr(kvm_host_cpu_state);
317342

arch/arm/kvm/hyp/tlb.c

Lines changed: 15 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -55,6 +55,21 @@ void __hyp_text __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa)
5555
__kvm_tlb_flush_vmid(kvm);
5656
}
5757

58+
void __hyp_text __kvm_tlb_flush_local_vmid(struct kvm_vcpu *vcpu)
59+
{
60+
struct kvm *kvm = kern_hyp_va(kern_hyp_va(vcpu)->kvm);
61+
62+
/* Switch to requested VMID */
63+
write_sysreg(kvm->arch.vttbr, VTTBR);
64+
isb();
65+
66+
write_sysreg(0, TLBIALL);
67+
dsb(nsh);
68+
isb();
69+
70+
write_sysreg(0, VTTBR);
71+
}
72+
5873
void __hyp_text __kvm_flush_vm_context(void)
5974
{
6075
write_sysreg(0, TLBIALLNSNHIS);

arch/arm64/include/asm/kvm_asm.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -54,6 +54,7 @@ extern char __kvm_hyp_vector[];
5454
extern void __kvm_flush_vm_context(void);
5555
extern void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa);
5656
extern void __kvm_tlb_flush_vmid(struct kvm *kvm);
57+
extern void __kvm_tlb_flush_local_vmid(struct kvm_vcpu *vcpu);
5758

5859
extern int __kvm_vcpu_run(struct kvm_vcpu *vcpu);
5960

arch/arm64/include/asm/kvm_host.h

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -62,6 +62,9 @@ struct kvm_arch {
6262
/* VTTBR value associated with above pgd and vmid */
6363
u64 vttbr;
6464

65+
/* The last vcpu id that ran on each physical CPU */
66+
int __percpu *last_vcpu_ran;
67+
6568
/* The maximum number of vCPUs depends on the used GIC model */
6669
int max_vcpus;
6770

arch/arm64/include/asm/kvm_mmu.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -128,7 +128,7 @@ static inline unsigned long __kern_hyp_va(unsigned long v)
128128
return v;
129129
}
130130

131-
#define kern_hyp_va(v) (typeof(v))(__kern_hyp_va((unsigned long)(v)))
131+
#define kern_hyp_va(v) ((typeof(v))(__kern_hyp_va((unsigned long)(v))))
132132

133133
/*
134134
* We currently only support a 40bit IPA.

arch/arm64/kvm/hyp/tlb.c

Lines changed: 15 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -64,6 +64,21 @@ void __hyp_text __kvm_tlb_flush_vmid(struct kvm *kvm)
6464
write_sysreg(0, vttbr_el2);
6565
}
6666

67+
void __hyp_text __kvm_tlb_flush_local_vmid(struct kvm_vcpu *vcpu)
68+
{
69+
struct kvm *kvm = kern_hyp_va(kern_hyp_va(vcpu)->kvm);
70+
71+
/* Switch to requested VMID */
72+
write_sysreg(kvm->arch.vttbr, vttbr_el2);
73+
isb();
74+
75+
asm volatile("tlbi vmalle1" : : );
76+
dsb(nsh);
77+
isb();
78+
79+
write_sysreg(0, vttbr_el2);
80+
}
81+
6782
void __hyp_text __kvm_flush_vm_context(void)
6883
{
6984
dsb(ishst);

virt/kvm/arm/vgic/vgic-mmio.c

Lines changed: 27 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -453,17 +453,33 @@ struct vgic_io_device *kvm_to_vgic_iodev(const struct kvm_io_device *dev)
453453
return container_of(dev, struct vgic_io_device, dev);
454454
}
455455

456-
static bool check_region(const struct vgic_register_region *region,
456+
static bool check_region(const struct kvm *kvm,
457+
const struct vgic_register_region *region,
457458
gpa_t addr, int len)
458459
{
459-
if ((region->access_flags & VGIC_ACCESS_8bit) && len == 1)
460-
return true;
461-
if ((region->access_flags & VGIC_ACCESS_32bit) &&
462-
len == sizeof(u32) && !(addr & 3))
463-
return true;
464-
if ((region->access_flags & VGIC_ACCESS_64bit) &&
465-
len == sizeof(u64) && !(addr & 7))
466-
return true;
460+
int flags, nr_irqs = kvm->arch.vgic.nr_spis + VGIC_NR_PRIVATE_IRQS;
461+
462+
switch (len) {
463+
case sizeof(u8):
464+
flags = VGIC_ACCESS_8bit;
465+
break;
466+
case sizeof(u32):
467+
flags = VGIC_ACCESS_32bit;
468+
break;
469+
case sizeof(u64):
470+
flags = VGIC_ACCESS_64bit;
471+
break;
472+
default:
473+
return false;
474+
}
475+
476+
if ((region->access_flags & flags) && IS_ALIGNED(addr, len)) {
477+
if (!region->bits_per_irq)
478+
return true;
479+
480+
/* Do we access a non-allocated IRQ? */
481+
return VGIC_ADDR_TO_INTID(addr, region->bits_per_irq) < nr_irqs;
482+
}
467483

468484
return false;
469485
}
@@ -477,7 +493,7 @@ static int dispatch_mmio_read(struct kvm_vcpu *vcpu, struct kvm_io_device *dev,
477493

478494
region = vgic_find_mmio_region(iodev->regions, iodev->nr_regions,
479495
addr - iodev->base_addr);
480-
if (!region || !check_region(region, addr, len)) {
496+
if (!region || !check_region(vcpu->kvm, region, addr, len)) {
481497
memset(val, 0, len);
482498
return 0;
483499
}
@@ -510,10 +526,7 @@ static int dispatch_mmio_write(struct kvm_vcpu *vcpu, struct kvm_io_device *dev,
510526

511527
region = vgic_find_mmio_region(iodev->regions, iodev->nr_regions,
512528
addr - iodev->base_addr);
513-
if (!region)
514-
return 0;
515-
516-
if (!check_region(region, addr, len))
529+
if (!region || !check_region(vcpu->kvm, region, addr, len))
517530
return 0;
518531

519532
switch (iodev->iodev_type) {

virt/kvm/arm/vgic/vgic-mmio.h

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -50,15 +50,15 @@ extern struct kvm_io_device_ops kvm_io_gic_ops;
5050
#define VGIC_ADDR_IRQ_MASK(bits) (((bits) * 1024 / 8) - 1)
5151

5252
/*
53-
* (addr & mask) gives us the byte offset for the INT ID, so we want to
54-
* divide this with 'bytes per irq' to get the INT ID, which is given
55-
* by '(bits) / 8'. But we do this with fixed-point-arithmetic and
56-
* take advantage of the fact that division by a fraction equals
57-
* multiplication with the inverted fraction, and scale up both the
58-
* numerator and denominator with 8 to support at most 64 bits per IRQ:
53+
* (addr & mask) gives us the _byte_ offset for the INT ID.
54+
* We multiply this by 8 the get the _bit_ offset, then divide this by
55+
* the number of bits to learn the actual INT ID.
56+
* But instead of a division (which requires a "long long div" implementation),
57+
* we shift by the binary logarithm of <bits>.
58+
* This assumes that <bits> is a power of two.
5959
*/
6060
#define VGIC_ADDR_TO_INTID(addr, bits) (((addr) & VGIC_ADDR_IRQ_MASK(bits)) * \
61-
64 / (bits) / 8)
61+
8 >> ilog2(bits))
6262

6363
/*
6464
* Some VGIC registers store per-IRQ information, with a different number

virt/kvm/arm/vgic/vgic.c

Lines changed: 12 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -273,6 +273,18 @@ bool vgic_queue_irq_unlock(struct kvm *kvm, struct vgic_irq *irq)
273273
* no more work for us to do.
274274
*/
275275
spin_unlock(&irq->irq_lock);
276+
277+
/*
278+
* We have to kick the VCPU here, because we could be
279+
* queueing an edge-triggered interrupt for which we
280+
* get no EOI maintenance interrupt. In that case,
281+
* while the IRQ is already on the VCPU's AP list, the
282+
* VCPU could have EOI'ed the original interrupt and
283+
* won't see this one until it exits for some other
284+
* reason.
285+
*/
286+
if (vcpu)
287+
kvm_vcpu_kick(vcpu);
276288
return false;
277289
}
278290

0 commit comments

Comments
 (0)