Skip to content

Commit af06149

Browse files
chazyChristoffer Dall
authored andcommitted
KVM: arm/arm64: vgic: Get rid of unnecessary process_maintenance operation
Since we always read back the LRs that we wrote to the guest and the MISR and EISR registers simply provide a summary of the configuration of the bits in the LRs, there is really no need to read back those status registers and process them. We might as well just signal the notifyfd when folding the LR state and save some cycles in the process. We now clear the underflow bit in the fold_lr_state functions as we only need to clear this bit if we had used all the LRs, so this is as good a place as any to do that work. Reviewed-by: Marc Zyngier <[email protected]> Signed-off-by: Christoffer Dall <[email protected]>
1 parent 90cac1f commit af06149

File tree

4 files changed

+25
-96
lines changed

4 files changed

+25
-96
lines changed

virt/kvm/arm/vgic/vgic-v2.c

Lines changed: 12 additions & 47 deletions
Original file line numberDiff line numberDiff line change
@@ -22,59 +22,17 @@
2222

2323
#include "vgic.h"
2424

25-
/*
26-
* Call this function to convert a u64 value to an unsigned long * bitmask
27-
* in a way that works on both 32-bit and 64-bit LE and BE platforms.
28-
*
29-
* Warning: Calling this function may modify *val.
30-
*/
31-
static unsigned long *u64_to_bitmask(u64 *val)
32-
{
33-
#if defined(CONFIG_CPU_BIG_ENDIAN) && BITS_PER_LONG == 32
34-
*val = (*val >> 32) | (*val << 32);
35-
#endif
36-
return (unsigned long *)val;
37-
}
38-
39-
void vgic_v2_process_maintenance(struct kvm_vcpu *vcpu)
25+
void vgic_v2_set_underflow(struct kvm_vcpu *vcpu)
4026
{
4127
struct vgic_v2_cpu_if *cpuif = &vcpu->arch.vgic_cpu.vgic_v2;
4228

43-
if (cpuif->vgic_misr & GICH_MISR_EOI) {
44-
u64 eisr = cpuif->vgic_eisr;
45-
unsigned long *eisr_bmap = u64_to_bitmask(&eisr);
46-
int lr;
47-
48-
for_each_set_bit(lr, eisr_bmap, kvm_vgic_global_state.nr_lr) {
49-
u32 intid = cpuif->vgic_lr[lr] & GICH_LR_VIRTUALID;
50-
51-
WARN_ON(cpuif->vgic_lr[lr] & GICH_LR_STATE);
52-
53-
/* Only SPIs require notification */
54-
if (vgic_valid_spi(vcpu->kvm, intid))
55-
kvm_notify_acked_irq(vcpu->kvm, 0,
56-
intid - VGIC_NR_PRIVATE_IRQS);
57-
}
58-
}
59-
60-
/* check and disable underflow maintenance IRQ */
61-
cpuif->vgic_hcr &= ~GICH_HCR_UIE;
62-
63-
/*
64-
* In the next iterations of the vcpu loop, if we sync the
65-
* vgic state after flushing it, but before entering the guest
66-
* (this happens for pending signals and vmid rollovers), then
67-
* make sure we don't pick up any old maintenance interrupts
68-
* here.
69-
*/
70-
cpuif->vgic_eisr = 0;
29+
cpuif->vgic_hcr |= GICH_HCR_UIE;
7130
}
7231

73-
void vgic_v2_set_underflow(struct kvm_vcpu *vcpu)
32+
static bool lr_signals_eoi_mi(u32 lr_val)
7433
{
75-
struct vgic_v2_cpu_if *cpuif = &vcpu->arch.vgic_cpu.vgic_v2;
76-
77-
cpuif->vgic_hcr |= GICH_HCR_UIE;
34+
return !(lr_val & GICH_LR_STATE) && (lr_val & GICH_LR_EOI) &&
35+
!(lr_val & GICH_LR_HW);
7836
}
7937

8038
/*
@@ -89,11 +47,18 @@ void vgic_v2_fold_lr_state(struct kvm_vcpu *vcpu)
8947
struct vgic_v2_cpu_if *cpuif = &vcpu->arch.vgic_cpu.vgic_v2;
9048
int lr;
9149

50+
cpuif->vgic_hcr &= ~GICH_HCR_UIE;
51+
9252
for (lr = 0; lr < vcpu->arch.vgic_cpu.used_lrs; lr++) {
9353
u32 val = cpuif->vgic_lr[lr];
9454
u32 intid = val & GICH_LR_VIRTUALID;
9555
struct vgic_irq *irq;
9656

57+
/* Notify fds when the guest EOI'ed a level-triggered SPI */
58+
if (lr_signals_eoi_mi(val) && vgic_valid_spi(vcpu->kvm, intid))
59+
kvm_notify_acked_irq(vcpu->kvm, 0,
60+
intid - VGIC_NR_PRIVATE_IRQS);
61+
9762
irq = vgic_get_irq(vcpu->kvm, vcpu, intid);
9863

9964
spin_lock(&irq->irq_lock);

virt/kvm/arm/vgic/vgic-v3.c

Lines changed: 13 additions & 38 deletions
Original file line numberDiff line numberDiff line change
@@ -21,50 +21,17 @@
2121

2222
#include "vgic.h"
2323

24-
void vgic_v3_process_maintenance(struct kvm_vcpu *vcpu)
24+
void vgic_v3_set_underflow(struct kvm_vcpu *vcpu)
2525
{
2626
struct vgic_v3_cpu_if *cpuif = &vcpu->arch.vgic_cpu.vgic_v3;
27-
u32 model = vcpu->kvm->arch.vgic.vgic_model;
28-
29-
if (cpuif->vgic_misr & ICH_MISR_EOI) {
30-
unsigned long eisr_bmap = cpuif->vgic_eisr;
31-
int lr;
32-
33-
for_each_set_bit(lr, &eisr_bmap, kvm_vgic_global_state.nr_lr) {
34-
u32 intid;
35-
u64 val = cpuif->vgic_lr[lr];
36-
37-
if (model == KVM_DEV_TYPE_ARM_VGIC_V3)
38-
intid = val & ICH_LR_VIRTUAL_ID_MASK;
39-
else
40-
intid = val & GICH_LR_VIRTUALID;
41-
42-
WARN_ON(cpuif->vgic_lr[lr] & ICH_LR_STATE);
43-
44-
/* Only SPIs require notification */
45-
if (vgic_valid_spi(vcpu->kvm, intid))
46-
kvm_notify_acked_irq(vcpu->kvm, 0,
47-
intid - VGIC_NR_PRIVATE_IRQS);
48-
}
49-
50-
/*
51-
* In the next iterations of the vcpu loop, if we sync
52-
* the vgic state after flushing it, but before
53-
* entering the guest (this happens for pending
54-
* signals and vmid rollovers), then make sure we
55-
* don't pick up any old maintenance interrupts here.
56-
*/
57-
cpuif->vgic_eisr = 0;
58-
}
5927

60-
cpuif->vgic_hcr &= ~ICH_HCR_UIE;
28+
cpuif->vgic_hcr |= ICH_HCR_UIE;
6129
}
6230

63-
void vgic_v3_set_underflow(struct kvm_vcpu *vcpu)
31+
static bool lr_signals_eoi_mi(u64 lr_val)
6432
{
65-
struct vgic_v3_cpu_if *cpuif = &vcpu->arch.vgic_cpu.vgic_v3;
66-
67-
cpuif->vgic_hcr |= ICH_HCR_UIE;
33+
return !(lr_val & ICH_LR_STATE) && (lr_val & ICH_LR_EOI) &&
34+
!(lr_val & ICH_LR_HW);
6835
}
6936

7037
void vgic_v3_fold_lr_state(struct kvm_vcpu *vcpu)
@@ -73,6 +40,8 @@ void vgic_v3_fold_lr_state(struct kvm_vcpu *vcpu)
7340
u32 model = vcpu->kvm->arch.vgic.vgic_model;
7441
int lr;
7542

43+
cpuif->vgic_hcr &= ~ICH_HCR_UIE;
44+
7645
for (lr = 0; lr < vcpu->arch.vgic_cpu.used_lrs; lr++) {
7746
u64 val = cpuif->vgic_lr[lr];
7847
u32 intid;
@@ -82,6 +51,12 @@ void vgic_v3_fold_lr_state(struct kvm_vcpu *vcpu)
8251
intid = val & ICH_LR_VIRTUAL_ID_MASK;
8352
else
8453
intid = val & GICH_LR_VIRTUALID;
54+
55+
/* Notify fds when the guest EOI'ed a level-triggered IRQ */
56+
if (lr_signals_eoi_mi(val) && vgic_valid_spi(vcpu->kvm, intid))
57+
kvm_notify_acked_irq(vcpu->kvm, 0,
58+
intid - VGIC_NR_PRIVATE_IRQS);
59+
8560
irq = vgic_get_irq(vcpu->kvm, vcpu, intid);
8661
if (!irq) /* An LPI could have been unmapped. */
8762
continue;

virt/kvm/arm/vgic/vgic.c

Lines changed: 0 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -527,14 +527,6 @@ static void vgic_prune_ap_list(struct kvm_vcpu *vcpu)
527527
spin_unlock(&vgic_cpu->ap_list_lock);
528528
}
529529

530-
static inline void vgic_process_maintenance_interrupt(struct kvm_vcpu *vcpu)
531-
{
532-
if (kvm_vgic_global_state.type == VGIC_V2)
533-
vgic_v2_process_maintenance(vcpu);
534-
else
535-
vgic_v3_process_maintenance(vcpu);
536-
}
537-
538530
static inline void vgic_fold_lr_state(struct kvm_vcpu *vcpu)
539531
{
540532
if (kvm_vgic_global_state.type == VGIC_V2)
@@ -644,7 +636,6 @@ void kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu)
644636
if (unlikely(!vgic_initialized(vcpu->kvm)))
645637
return;
646638

647-
vgic_process_maintenance_interrupt(vcpu);
648639
vgic_fold_lr_state(vcpu);
649640
vgic_prune_ap_list(vcpu);
650641

virt/kvm/arm/vgic/vgic.h

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -112,7 +112,6 @@ void vgic_kick_vcpus(struct kvm *kvm);
112112
int vgic_check_ioaddr(struct kvm *kvm, phys_addr_t *ioaddr,
113113
phys_addr_t addr, phys_addr_t alignment);
114114

115-
void vgic_v2_process_maintenance(struct kvm_vcpu *vcpu);
116115
void vgic_v2_fold_lr_state(struct kvm_vcpu *vcpu);
117116
void vgic_v2_populate_lr(struct kvm_vcpu *vcpu, struct vgic_irq *irq, int lr);
118117
void vgic_v2_clear_lr(struct kvm_vcpu *vcpu, int lr);
@@ -141,7 +140,6 @@ static inline void vgic_get_irq_kref(struct vgic_irq *irq)
141140
kref_get(&irq->refcount);
142141
}
143142

144-
void vgic_v3_process_maintenance(struct kvm_vcpu *vcpu);
145143
void vgic_v3_fold_lr_state(struct kvm_vcpu *vcpu);
146144
void vgic_v3_populate_lr(struct kvm_vcpu *vcpu, struct vgic_irq *irq, int lr);
147145
void vgic_v3_clear_lr(struct kvm_vcpu *vcpu, int lr);

0 commit comments

Comments
 (0)