Skip to content

Commit 0919e84

Browse files
Marc Zyngierchazy
authored andcommitted
KVM: arm/arm64: vgic-new: Add IRQ sync/flush framework
Implement the framework for syncing IRQs between our emulation and the list registers, which represent the guest's view of IRQs. This is done in kvm_vgic_flush_hwstate and kvm_vgic_sync_hwstate, which gets called on guest entry and exit. The code talking to the actual GICv2/v3 hardware is added in the following patches. Signed-off-by: Marc Zyngier <[email protected]> Signed-off-by: Christoffer Dall <[email protected]> Signed-off-by: Eric Auger <[email protected]> Signed-off-by: Andre Przywara <[email protected]> Reviewed-by: Eric Auger <[email protected]> Reviewed-by: Christoffer Dall <[email protected]>
1 parent 8e44474 commit 0919e84

File tree

3 files changed

+198
-0
lines changed

3 files changed

+198
-0
lines changed

include/kvm/vgic/vgic.h

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -190,6 +190,10 @@ int kvm_vgic_inject_irq(struct kvm *kvm, int cpuid, unsigned int intid,
190190
#define vgic_valid_spi(k, i) (((i) >= VGIC_NR_PRIVATE_IRQS) && \
191191
((i) < (k)->arch.vgic.nr_spis + VGIC_NR_PRIVATE_IRQS))
192192

193+
bool kvm_vcpu_has_pending_irqs(struct kvm_vcpu *vcpu);
194+
void kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu);
195+
void kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu);
196+
193197
/**
194198
* kvm_vgic_get_max_vcpus - Get the maximum number of VCPUs allowed by HW
195199
*

virt/kvm/arm/vgic/vgic.c

Lines changed: 192 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -307,3 +307,195 @@ int kvm_vgic_inject_irq(struct kvm *kvm, int cpuid, unsigned int intid,
307307
{
308308
return vgic_update_irq_pending(kvm, cpuid, intid, level, false);
309309
}
310+
311+
/**
312+
* vgic_prune_ap_list - Remove non-relevant interrupts from the list
313+
*
314+
* @vcpu: The VCPU pointer
315+
*
316+
* Go over the list of "interesting" interrupts, and prune those that we
317+
* won't have to consider in the near future.
318+
*/
319+
static void vgic_prune_ap_list(struct kvm_vcpu *vcpu)
320+
{
321+
struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
322+
struct vgic_irq *irq, *tmp;
323+
324+
retry:
325+
spin_lock(&vgic_cpu->ap_list_lock);
326+
327+
list_for_each_entry_safe(irq, tmp, &vgic_cpu->ap_list_head, ap_list) {
328+
struct kvm_vcpu *target_vcpu, *vcpuA, *vcpuB;
329+
330+
spin_lock(&irq->irq_lock);
331+
332+
BUG_ON(vcpu != irq->vcpu);
333+
334+
target_vcpu = vgic_target_oracle(irq);
335+
336+
if (!target_vcpu) {
337+
/*
338+
* We don't need to process this interrupt any
339+
* further, move it off the list.
340+
*/
341+
list_del(&irq->ap_list);
342+
irq->vcpu = NULL;
343+
spin_unlock(&irq->irq_lock);
344+
continue;
345+
}
346+
347+
if (target_vcpu == vcpu) {
348+
/* We're on the right CPU */
349+
spin_unlock(&irq->irq_lock);
350+
continue;
351+
}
352+
353+
/* This interrupt looks like it has to be migrated. */
354+
355+
spin_unlock(&irq->irq_lock);
356+
spin_unlock(&vgic_cpu->ap_list_lock);
357+
358+
/*
359+
* Ensure locking order by always locking the smallest
360+
* ID first.
361+
*/
362+
if (vcpu->vcpu_id < target_vcpu->vcpu_id) {
363+
vcpuA = vcpu;
364+
vcpuB = target_vcpu;
365+
} else {
366+
vcpuA = target_vcpu;
367+
vcpuB = vcpu;
368+
}
369+
370+
spin_lock(&vcpuA->arch.vgic_cpu.ap_list_lock);
371+
spin_lock_nested(&vcpuB->arch.vgic_cpu.ap_list_lock,
372+
SINGLE_DEPTH_NESTING);
373+
spin_lock(&irq->irq_lock);
374+
375+
/*
376+
* If the affinity has been preserved, move the
377+
* interrupt around. Otherwise, it means things have
378+
* changed while the interrupt was unlocked, and we
379+
* need to replay this.
380+
*
381+
* In all cases, we cannot trust the list not to have
382+
* changed, so we restart from the beginning.
383+
*/
384+
if (target_vcpu == vgic_target_oracle(irq)) {
385+
struct vgic_cpu *new_cpu = &target_vcpu->arch.vgic_cpu;
386+
387+
list_del(&irq->ap_list);
388+
irq->vcpu = target_vcpu;
389+
list_add_tail(&irq->ap_list, &new_cpu->ap_list_head);
390+
}
391+
392+
spin_unlock(&irq->irq_lock);
393+
spin_unlock(&vcpuB->arch.vgic_cpu.ap_list_lock);
394+
spin_unlock(&vcpuA->arch.vgic_cpu.ap_list_lock);
395+
goto retry;
396+
}
397+
398+
spin_unlock(&vgic_cpu->ap_list_lock);
399+
}
400+
401+
static inline void vgic_process_maintenance_interrupt(struct kvm_vcpu *vcpu)
402+
{
403+
}
404+
405+
static inline void vgic_fold_lr_state(struct kvm_vcpu *vcpu)
406+
{
407+
}
408+
409+
/* Requires the irq_lock to be held. */
410+
static inline void vgic_populate_lr(struct kvm_vcpu *vcpu,
411+
struct vgic_irq *irq, int lr)
412+
{
413+
DEBUG_SPINLOCK_BUG_ON(!spin_is_locked(&irq->irq_lock));
414+
}
415+
416+
static inline void vgic_clear_lr(struct kvm_vcpu *vcpu, int lr)
417+
{
418+
}
419+
420+
static inline void vgic_set_underflow(struct kvm_vcpu *vcpu)
421+
{
422+
}
423+
424+
/* Requires the ap_list_lock to be held. */
425+
static int compute_ap_list_depth(struct kvm_vcpu *vcpu)
426+
{
427+
struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
428+
struct vgic_irq *irq;
429+
int count = 0;
430+
431+
DEBUG_SPINLOCK_BUG_ON(!spin_is_locked(&vgic_cpu->ap_list_lock));
432+
433+
list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) {
434+
spin_lock(&irq->irq_lock);
435+
/* GICv2 SGIs can count for more than one... */
436+
if (vgic_irq_is_sgi(irq->intid) && irq->source)
437+
count += hweight8(irq->source);
438+
else
439+
count++;
440+
spin_unlock(&irq->irq_lock);
441+
}
442+
return count;
443+
}
444+
445+
/* Requires the VCPU's ap_list_lock to be held. */
446+
static void vgic_flush_lr_state(struct kvm_vcpu *vcpu)
447+
{
448+
struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
449+
struct vgic_irq *irq;
450+
int count = 0;
451+
452+
DEBUG_SPINLOCK_BUG_ON(!spin_is_locked(&vgic_cpu->ap_list_lock));
453+
454+
if (compute_ap_list_depth(vcpu) > kvm_vgic_global_state.nr_lr) {
455+
vgic_set_underflow(vcpu);
456+
vgic_sort_ap_list(vcpu);
457+
}
458+
459+
list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) {
460+
spin_lock(&irq->irq_lock);
461+
462+
if (unlikely(vgic_target_oracle(irq) != vcpu))
463+
goto next;
464+
465+
/*
466+
* If we get an SGI with multiple sources, try to get
467+
* them in all at once.
468+
*/
469+
do {
470+
vgic_populate_lr(vcpu, irq, count++);
471+
} while (irq->source && count < kvm_vgic_global_state.nr_lr);
472+
473+
next:
474+
spin_unlock(&irq->irq_lock);
475+
476+
if (count == kvm_vgic_global_state.nr_lr)
477+
break;
478+
}
479+
480+
vcpu->arch.vgic_cpu.used_lrs = count;
481+
482+
/* Nuke remaining LRs */
483+
for ( ; count < kvm_vgic_global_state.nr_lr; count++)
484+
vgic_clear_lr(vcpu, count);
485+
}
486+
487+
/* Sync back the hardware VGIC state into our emulation after a guest's run. */
488+
void kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu)
489+
{
490+
vgic_process_maintenance_interrupt(vcpu);
491+
vgic_fold_lr_state(vcpu);
492+
vgic_prune_ap_list(vcpu);
493+
}
494+
495+
/* Flush our emulation state into the GIC hardware before entering the guest. */
496+
void kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu)
497+
{
498+
spin_lock(&vcpu->arch.vgic_cpu.ap_list_lock);
499+
vgic_flush_lr_state(vcpu);
500+
spin_unlock(&vcpu->arch.vgic_cpu.ap_list_lock);
501+
}

virt/kvm/arm/vgic/vgic.h

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -16,6 +16,8 @@
1616
#ifndef __KVM_ARM_VGIC_NEW_H__
1717
#define __KVM_ARM_VGIC_NEW_H__
1818

19+
#define vgic_irq_is_sgi(intid) ((intid) < VGIC_NR_SGIS)
20+
1921
struct vgic_irq *vgic_get_irq(struct kvm *kvm, struct kvm_vcpu *vcpu,
2022
u32 intid);
2123
bool vgic_queue_irq_unlock(struct kvm *kvm, struct vgic_irq *irq);

0 commit comments

Comments
 (0)