Skip to content

Commit 5369290

Browse files
author
Marc Zyngier
committed
KVM: arm/arm64: vgic: Fix source vcpu issues for GICv2 SGI
Now that we make sure we don't inject multiple instances of the same GICv2 SGI at the same time, we've made another bug more obvious: If we exit with an active SGI, we completely lose track of which vcpu it came from. On the next entry, we restore it with 0 as a source, and if that wasn't the right one, too bad. While this doesn't seem to trouble GIC-400, the architectural model gets offended and doesn't deactivate the interrupt on EOI. Another connected issue is that we will happilly make pending an interrupt from another vcpu, overriding the above zero with something that is just as inconsistent. Don't do that. The final issue is that we signal a maintenance interrupt when no pending interrupts are present in the LR. Assuming we've fixed the two issues above, we end-up in a situation where we keep exiting as soon as we've reached the active state, and not be able to inject the following pending. The fix comes in 3 parts: - GICv2 SGIs have their source vcpu saved if they are active on exit, and restored on entry - Multi-SGIs cannot go via the Pending+Active state, as this would corrupt the source field - Multi-SGIs are converted to using MI on EOI instead of NPIE Fixes: 16ca6a6 ("KVM: arm/arm64: vgic: Don't populate multiple LRs with the same vintid") Reported-by: Mark Rutland <[email protected]> Tested-by: Mark Rutland <[email protected]> Reviewed-by: Christoffer Dall <[email protected]> Signed-off-by: Marc Zyngier <[email protected]>
1 parent 85bd0ba commit 5369290

File tree

6 files changed

+81
-61
lines changed

6 files changed

+81
-61
lines changed

include/kvm/arm_vgic.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -131,6 +131,7 @@ struct vgic_irq {
131131
u32 mpidr; /* GICv3 target VCPU */
132132
};
133133
u8 source; /* GICv2 SGIs only */
134+
u8 active_source; /* GICv2 SGIs only */
134135
u8 priority;
135136
enum vgic_irq_config config; /* Level or edge */
136137

virt/kvm/arm/vgic/vgic-mmio.c

Lines changed: 8 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -289,10 +289,16 @@ static void vgic_mmio_change_active(struct kvm_vcpu *vcpu, struct vgic_irq *irq,
289289
irq->vcpu->cpu != -1) /* VCPU thread is running */
290290
cond_resched_lock(&irq->irq_lock);
291291

292-
if (irq->hw)
292+
if (irq->hw) {
293293
vgic_hw_irq_change_active(vcpu, irq, active, !requester_vcpu);
294-
else
294+
} else {
295+
u32 model = vcpu->kvm->arch.vgic.vgic_model;
296+
295297
irq->active = active;
298+
if (model == KVM_DEV_TYPE_ARM_VGIC_V2 &&
299+
active && vgic_irq_is_sgi(irq->intid))
300+
irq->active_source = requester_vcpu->vcpu_id;
301+
}
296302

297303
if (irq->active)
298304
vgic_queue_irq_unlock(vcpu->kvm, irq, flags);

virt/kvm/arm/vgic/vgic-v2.c

Lines changed: 22 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -37,13 +37,6 @@ void vgic_v2_init_lrs(void)
3737
vgic_v2_write_lr(i, 0);
3838
}
3939

40-
void vgic_v2_set_npie(struct kvm_vcpu *vcpu)
41-
{
42-
struct vgic_v2_cpu_if *cpuif = &vcpu->arch.vgic_cpu.vgic_v2;
43-
44-
cpuif->vgic_hcr |= GICH_HCR_NPIE;
45-
}
46-
4740
void vgic_v2_set_underflow(struct kvm_vcpu *vcpu)
4841
{
4942
struct vgic_v2_cpu_if *cpuif = &vcpu->arch.vgic_cpu.vgic_v2;
@@ -71,13 +64,18 @@ void vgic_v2_fold_lr_state(struct kvm_vcpu *vcpu)
7164
int lr;
7265
unsigned long flags;
7366

74-
cpuif->vgic_hcr &= ~(GICH_HCR_UIE | GICH_HCR_NPIE);
67+
cpuif->vgic_hcr &= ~GICH_HCR_UIE;
7568

7669
for (lr = 0; lr < vgic_cpu->used_lrs; lr++) {
7770
u32 val = cpuif->vgic_lr[lr];
78-
u32 intid = val & GICH_LR_VIRTUALID;
71+
u32 cpuid, intid = val & GICH_LR_VIRTUALID;
7972
struct vgic_irq *irq;
8073

74+
/* Extract the source vCPU id from the LR */
75+
cpuid = val & GICH_LR_PHYSID_CPUID;
76+
cpuid >>= GICH_LR_PHYSID_CPUID_SHIFT;
77+
cpuid &= 7;
78+
8179
/* Notify fds when the guest EOI'ed a level-triggered SPI */
8280
if (lr_signals_eoi_mi(val) && vgic_valid_spi(vcpu->kvm, intid))
8381
kvm_notify_acked_irq(vcpu->kvm, 0,
@@ -90,17 +88,16 @@ void vgic_v2_fold_lr_state(struct kvm_vcpu *vcpu)
9088
/* Always preserve the active bit */
9189
irq->active = !!(val & GICH_LR_ACTIVE_BIT);
9290

91+
if (irq->active && vgic_irq_is_sgi(intid))
92+
irq->active_source = cpuid;
93+
9394
/* Edge is the only case where we preserve the pending bit */
9495
if (irq->config == VGIC_CONFIG_EDGE &&
9596
(val & GICH_LR_PENDING_BIT)) {
9697
irq->pending_latch = true;
9798

98-
if (vgic_irq_is_sgi(intid)) {
99-
u32 cpuid = val & GICH_LR_PHYSID_CPUID;
100-
101-
cpuid >>= GICH_LR_PHYSID_CPUID_SHIFT;
99+
if (vgic_irq_is_sgi(intid))
102100
irq->source |= (1 << cpuid);
103-
}
104101
}
105102

106103
/*
@@ -152,8 +149,15 @@ void vgic_v2_populate_lr(struct kvm_vcpu *vcpu, struct vgic_irq *irq, int lr)
152149
u32 val = irq->intid;
153150
bool allow_pending = true;
154151

155-
if (irq->active)
152+
if (irq->active) {
156153
val |= GICH_LR_ACTIVE_BIT;
154+
if (vgic_irq_is_sgi(irq->intid))
155+
val |= irq->active_source << GICH_LR_PHYSID_CPUID_SHIFT;
156+
if (vgic_irq_is_multi_sgi(irq)) {
157+
allow_pending = false;
158+
val |= GICH_LR_EOI;
159+
}
160+
}
157161

158162
if (irq->hw) {
159163
val |= GICH_LR_HW;
@@ -190,8 +194,10 @@ void vgic_v2_populate_lr(struct kvm_vcpu *vcpu, struct vgic_irq *irq, int lr)
190194
BUG_ON(!src);
191195
val |= (src - 1) << GICH_LR_PHYSID_CPUID_SHIFT;
192196
irq->source &= ~(1 << (src - 1));
193-
if (irq->source)
197+
if (irq->source) {
194198
irq->pending_latch = true;
199+
val |= GICH_LR_EOI;
200+
}
195201
}
196202
}
197203

virt/kvm/arm/vgic/vgic-v3.c

Lines changed: 29 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -27,13 +27,6 @@ static bool group1_trap;
2727
static bool common_trap;
2828
static bool gicv4_enable;
2929

30-
void vgic_v3_set_npie(struct kvm_vcpu *vcpu)
31-
{
32-
struct vgic_v3_cpu_if *cpuif = &vcpu->arch.vgic_cpu.vgic_v3;
33-
34-
cpuif->vgic_hcr |= ICH_HCR_NPIE;
35-
}
36-
3730
void vgic_v3_set_underflow(struct kvm_vcpu *vcpu)
3831
{
3932
struct vgic_v3_cpu_if *cpuif = &vcpu->arch.vgic_cpu.vgic_v3;
@@ -55,17 +48,23 @@ void vgic_v3_fold_lr_state(struct kvm_vcpu *vcpu)
5548
int lr;
5649
unsigned long flags;
5750

58-
cpuif->vgic_hcr &= ~(ICH_HCR_UIE | ICH_HCR_NPIE);
51+
cpuif->vgic_hcr &= ~ICH_HCR_UIE;
5952

6053
for (lr = 0; lr < vgic_cpu->used_lrs; lr++) {
6154
u64 val = cpuif->vgic_lr[lr];
62-
u32 intid;
55+
u32 intid, cpuid;
6356
struct vgic_irq *irq;
57+
bool is_v2_sgi = false;
6458

65-
if (model == KVM_DEV_TYPE_ARM_VGIC_V3)
59+
cpuid = val & GICH_LR_PHYSID_CPUID;
60+
cpuid >>= GICH_LR_PHYSID_CPUID_SHIFT;
61+
62+
if (model == KVM_DEV_TYPE_ARM_VGIC_V3) {
6663
intid = val & ICH_LR_VIRTUAL_ID_MASK;
67-
else
64+
} else {
6865
intid = val & GICH_LR_VIRTUALID;
66+
is_v2_sgi = vgic_irq_is_sgi(intid);
67+
}
6968

7069
/* Notify fds when the guest EOI'ed a level-triggered IRQ */
7170
if (lr_signals_eoi_mi(val) && vgic_valid_spi(vcpu->kvm, intid))
@@ -81,18 +80,16 @@ void vgic_v3_fold_lr_state(struct kvm_vcpu *vcpu)
8180
/* Always preserve the active bit */
8281
irq->active = !!(val & ICH_LR_ACTIVE_BIT);
8382

83+
if (irq->active && is_v2_sgi)
84+
irq->active_source = cpuid;
85+
8486
/* Edge is the only case where we preserve the pending bit */
8587
if (irq->config == VGIC_CONFIG_EDGE &&
8688
(val & ICH_LR_PENDING_BIT)) {
8789
irq->pending_latch = true;
8890

89-
if (vgic_irq_is_sgi(intid) &&
90-
model == KVM_DEV_TYPE_ARM_VGIC_V2) {
91-
u32 cpuid = val & GICH_LR_PHYSID_CPUID;
92-
93-
cpuid >>= GICH_LR_PHYSID_CPUID_SHIFT;
91+
if (is_v2_sgi)
9492
irq->source |= (1 << cpuid);
95-
}
9693
}
9794

9895
/*
@@ -133,10 +130,20 @@ void vgic_v3_populate_lr(struct kvm_vcpu *vcpu, struct vgic_irq *irq, int lr)
133130
{
134131
u32 model = vcpu->kvm->arch.vgic.vgic_model;
135132
u64 val = irq->intid;
136-
bool allow_pending = true;
133+
bool allow_pending = true, is_v2_sgi;
137134

138-
if (irq->active)
135+
is_v2_sgi = (vgic_irq_is_sgi(irq->intid) &&
136+
model == KVM_DEV_TYPE_ARM_VGIC_V2);
137+
138+
if (irq->active) {
139139
val |= ICH_LR_ACTIVE_BIT;
140+
if (is_v2_sgi)
141+
val |= irq->active_source << GICH_LR_PHYSID_CPUID_SHIFT;
142+
if (vgic_irq_is_multi_sgi(irq)) {
143+
allow_pending = false;
144+
val |= ICH_LR_EOI;
145+
}
146+
}
140147

141148
if (irq->hw) {
142149
val |= ICH_LR_HW;
@@ -174,8 +181,10 @@ void vgic_v3_populate_lr(struct kvm_vcpu *vcpu, struct vgic_irq *irq, int lr)
174181
BUG_ON(!src);
175182
val |= (src - 1) << GICH_LR_PHYSID_CPUID_SHIFT;
176183
irq->source &= ~(1 << (src - 1));
177-
if (irq->source)
184+
if (irq->source) {
178185
irq->pending_latch = true;
186+
val |= ICH_LR_EOI;
187+
}
179188
}
180189
}
181190

virt/kvm/arm/vgic/vgic.c

Lines changed: 7 additions & 23 deletions
Original file line numberDiff line numberDiff line change
@@ -719,14 +719,6 @@ static inline void vgic_set_underflow(struct kvm_vcpu *vcpu)
719719
vgic_v3_set_underflow(vcpu);
720720
}
721721

722-
static inline void vgic_set_npie(struct kvm_vcpu *vcpu)
723-
{
724-
if (kvm_vgic_global_state.type == VGIC_V2)
725-
vgic_v2_set_npie(vcpu);
726-
else
727-
vgic_v3_set_npie(vcpu);
728-
}
729-
730722
/* Requires the ap_list_lock to be held. */
731723
static int compute_ap_list_depth(struct kvm_vcpu *vcpu,
732724
bool *multi_sgi)
@@ -740,17 +732,15 @@ static int compute_ap_list_depth(struct kvm_vcpu *vcpu,
740732
DEBUG_SPINLOCK_BUG_ON(!spin_is_locked(&vgic_cpu->ap_list_lock));
741733

742734
list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) {
735+
int w;
736+
743737
spin_lock(&irq->irq_lock);
744738
/* GICv2 SGIs can count for more than one... */
745-
if (vgic_irq_is_sgi(irq->intid) && irq->source) {
746-
int w = hweight8(irq->source);
747-
748-
count += w;
749-
*multi_sgi |= (w > 1);
750-
} else {
751-
count++;
752-
}
739+
w = vgic_irq_get_lr_count(irq);
753740
spin_unlock(&irq->irq_lock);
741+
742+
count += w;
743+
*multi_sgi |= (w > 1);
754744
}
755745
return count;
756746
}
@@ -761,7 +751,6 @@ static void vgic_flush_lr_state(struct kvm_vcpu *vcpu)
761751
struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
762752
struct vgic_irq *irq;
763753
int count;
764-
bool npie = false;
765754
bool multi_sgi;
766755
u8 prio = 0xff;
767756

@@ -791,10 +780,8 @@ static void vgic_flush_lr_state(struct kvm_vcpu *vcpu)
791780
if (likely(vgic_target_oracle(irq) == vcpu)) {
792781
vgic_populate_lr(vcpu, irq, count++);
793782

794-
if (irq->source) {
795-
npie = true;
783+
if (irq->source)
796784
prio = irq->priority;
797-
}
798785
}
799786

800787
spin_unlock(&irq->irq_lock);
@@ -807,9 +794,6 @@ static void vgic_flush_lr_state(struct kvm_vcpu *vcpu)
807794
}
808795
}
809796

810-
if (npie)
811-
vgic_set_npie(vcpu);
812-
813797
vcpu->arch.vgic_cpu.used_lrs = count;
814798

815799
/* Nuke remaining LRs */

virt/kvm/arm/vgic/vgic.h

Lines changed: 14 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -110,6 +110,20 @@ static inline bool vgic_irq_is_mapped_level(struct vgic_irq *irq)
110110
return irq->config == VGIC_CONFIG_LEVEL && irq->hw;
111111
}
112112

113+
static inline int vgic_irq_get_lr_count(struct vgic_irq *irq)
114+
{
115+
/* Account for the active state as an interrupt */
116+
if (vgic_irq_is_sgi(irq->intid) && irq->source)
117+
return hweight8(irq->source) + irq->active;
118+
119+
return irq_is_pending(irq) || irq->active;
120+
}
121+
122+
static inline bool vgic_irq_is_multi_sgi(struct vgic_irq *irq)
123+
{
124+
return vgic_irq_get_lr_count(irq) > 1;
125+
}
126+
113127
/*
114128
* This struct provides an intermediate representation of the fields contained
115129
* in the GICH_VMCR and ICH_VMCR registers, such that code exporting the GIC

0 commit comments

Comments
 (0)