Skip to content

Commit 61374cc

Browse files
committed
Merge tag 'kvmarm-fixes-6.16-1' of https://git.kernel.org/pub/scm/linux/kernel/git/kvmarm/kvmarm into HEAD
KVM/arm64 fixes for 6.16, take #1 - Make the irqbypass hooks resilient to changes in the GSI<->MSI routing, avoiding behind stale vLPI mappings being left behind. The fix is to resolve the VGIC IRQ using the host IRQ (which is stable) and nuking the vLPI mapping upon a routing change. - Close another VGIC race where vCPU creation races with VGIC creation, leading to in-flight vCPUs entering the kernel w/o private IRQs allocated. - Fix a build issue triggered by the recently added workaround for Ampere's AC04_CPU_23 erratum. - Correctly sign-extend the VA when emulating a TLBI instruction potentially targeting a VNCR mapping. - Avoid dereferencing a NULL pointer in the VGIC debug code, which can happen if the device doesn't have any mapping yet.
2 parents 438e228 + 4d62121 commit 61374cc

File tree

8 files changed

+134
-74
lines changed

8 files changed

+134
-74
lines changed

arch/arm64/include/asm/sysreg.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -12,6 +12,7 @@
1212
#include <linux/bits.h>
1313
#include <linux/stringify.h>
1414
#include <linux/kasan-tags.h>
15+
#include <linux/kconfig.h>
1516

1617
#include <asm/gpr-num.h>
1718

arch/arm64/kvm/arm.c

Lines changed: 24 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -2747,6 +2747,7 @@ int kvm_arch_irq_bypass_add_producer(struct irq_bypass_consumer *cons,
27472747
return kvm_vgic_v4_set_forwarding(irqfd->kvm, prod->irq,
27482748
&irqfd->irq_entry);
27492749
}
2750+
27502751
void kvm_arch_irq_bypass_del_producer(struct irq_bypass_consumer *cons,
27512752
struct irq_bypass_producer *prod)
27522753
{
@@ -2757,8 +2758,29 @@ void kvm_arch_irq_bypass_del_producer(struct irq_bypass_consumer *cons,
27572758
if (irq_entry->type != KVM_IRQ_ROUTING_MSI)
27582759
return;
27592760

2760-
kvm_vgic_v4_unset_forwarding(irqfd->kvm, prod->irq,
2761-
&irqfd->irq_entry);
2761+
kvm_vgic_v4_unset_forwarding(irqfd->kvm, prod->irq);
2762+
}
2763+
2764+
bool kvm_arch_irqfd_route_changed(struct kvm_kernel_irq_routing_entry *old,
2765+
struct kvm_kernel_irq_routing_entry *new)
2766+
{
2767+
if (new->type != KVM_IRQ_ROUTING_MSI)
2768+
return true;
2769+
2770+
return memcmp(&old->msi, &new->msi, sizeof(new->msi));
2771+
}
2772+
2773+
int kvm_arch_update_irqfd_routing(struct kvm *kvm, unsigned int host_irq,
2774+
uint32_t guest_irq, bool set)
2775+
{
2776+
/*
2777+
* Remapping the vLPI requires taking the its_lock mutex to resolve
2778+
* the new translation. We're in spinlock land at this point, so no
2779+
* chance of resolving the translation.
2780+
*
2781+
* Unmap the vLPI and fall back to software LPI injection.
2782+
*/
2783+
return kvm_vgic_v4_unset_forwarding(kvm, host_irq);
27622784
}
27632785

27642786
void kvm_arch_irq_bypass_stop(struct irq_bypass_consumer *cons)

arch/arm64/kvm/nested.c

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -918,6 +918,8 @@ static void invalidate_vncr_va(struct kvm *kvm,
918918
}
919919
}
920920

921+
#define tlbi_va_s1_to_va(v) (u64)sign_extend64((v) << 12, 48)
922+
921923
static void compute_s1_tlbi_range(struct kvm_vcpu *vcpu, u32 inst, u64 val,
922924
struct s1e2_tlbi_scope *scope)
923925
{
@@ -964,7 +966,7 @@ static void compute_s1_tlbi_range(struct kvm_vcpu *vcpu, u32 inst, u64 val,
964966
scope->size = ttl_to_size(FIELD_GET(TLBI_TTL_MASK, val));
965967
if (!scope->size)
966968
scope->size = SZ_1G;
967-
scope->va = (val << 12) & ~(scope->size - 1);
969+
scope->va = tlbi_va_s1_to_va(val) & ~(scope->size - 1);
968970
scope->asid = FIELD_GET(TLBIR_ASID_MASK, val);
969971
break;
970972
case OP_TLBI_ASIDE1:
@@ -992,7 +994,7 @@ static void compute_s1_tlbi_range(struct kvm_vcpu *vcpu, u32 inst, u64 val,
992994
scope->size = ttl_to_size(FIELD_GET(TLBI_TTL_MASK, val));
993995
if (!scope->size)
994996
scope->size = SZ_1G;
995-
scope->va = (val << 12) & ~(scope->size - 1);
997+
scope->va = tlbi_va_s1_to_va(val) & ~(scope->size - 1);
996998
break;
997999
case OP_TLBI_RVAE2:
9981000
case OP_TLBI_RVAE2IS:

arch/arm64/kvm/vgic/vgic-debug.c

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -490,6 +490,9 @@ static int vgic_its_debug_show(struct seq_file *s, void *v)
490490
struct its_device *dev = iter->dev;
491491
struct its_ite *ite = iter->ite;
492492

493+
if (!ite)
494+
return 0;
495+
493496
if (list_is_first(&ite->ite_list, &dev->itt_head)) {
494497
seq_printf(s, "\n");
495498
seq_printf(s, "Device ID: 0x%x, Event ID Range: [0 - %llu]\n",
@@ -498,7 +501,7 @@ static int vgic_its_debug_show(struct seq_file *s, void *v)
498501
seq_printf(s, "-----------------------------------------------\n");
499502
}
500503

501-
if (ite && ite->irq && ite->collection) {
504+
if (ite->irq && ite->collection) {
502505
seq_printf(s, "%8u %8u %8u %8u %8u %2d\n",
503506
ite->event_id, ite->irq->intid, ite->irq->hwintid,
504507
ite->collection->target_addr,

arch/arm64/kvm/vgic/vgic-init.c

Lines changed: 26 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -84,15 +84,40 @@ int kvm_vgic_create(struct kvm *kvm, u32 type)
8484
!kvm_vgic_global_state.can_emulate_gicv2)
8585
return -ENODEV;
8686

87-
/* Must be held to avoid race with vCPU creation */
87+
/*
88+
* Ensure mutual exclusion with vCPU creation and any vCPU ioctls by:
89+
*
90+
* - Holding kvm->lock to prevent KVM_CREATE_VCPU from reaching
91+
* kvm_arch_vcpu_precreate() and ensuring created_vcpus is stable.
92+
* This alone is insufficient, as kvm_vm_ioctl_create_vcpu() drops
93+
* the kvm->lock before completing the vCPU creation.
94+
*/
8895
lockdep_assert_held(&kvm->lock);
8996

97+
/*
98+
* - Acquiring the vCPU mutex for every *online* vCPU to prevent
99+
* concurrent vCPU ioctls for vCPUs already visible to userspace.
100+
*/
90101
ret = -EBUSY;
91102
if (kvm_trylock_all_vcpus(kvm))
92103
return ret;
93104

105+
/*
106+
* - Taking the config_lock which protects VGIC data structures such
107+
* as the per-vCPU arrays of private IRQs (SGIs, PPIs).
108+
*/
94109
mutex_lock(&kvm->arch.config_lock);
95110

111+
/*
112+
* - Bailing on the entire thing if a vCPU is in the middle of creation,
113+
* dropped the kvm->lock, but hasn't reached kvm_arch_vcpu_create().
114+
*
115+
* The whole combination of this guarantees that no vCPU can get into
116+
* KVM with a VGIC configuration inconsistent with the VM's VGIC.
117+
*/
118+
if (kvm->created_vcpus != atomic_read(&kvm->online_vcpus))
119+
goto out_unlock;
120+
96121
if (irqchip_in_kernel(kvm)) {
97122
ret = -EEXIST;
98123
goto out_unlock;

arch/arm64/kvm/vgic/vgic-its.c

Lines changed: 24 additions & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -306,39 +306,34 @@ static int update_lpi_config(struct kvm *kvm, struct vgic_irq *irq,
306306
}
307307
}
308308

309-
raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
310-
311309
if (irq->hw)
312-
return its_prop_update_vlpi(irq->host_irq, prop, needs_inv);
310+
ret = its_prop_update_vlpi(irq->host_irq, prop, needs_inv);
313311

314-
return 0;
312+
raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
313+
return ret;
315314
}
316315

317316
static int update_affinity(struct vgic_irq *irq, struct kvm_vcpu *vcpu)
318317
{
319-
int ret = 0;
320-
unsigned long flags;
318+
struct its_vlpi_map map;
319+
int ret;
321320

322-
raw_spin_lock_irqsave(&irq->irq_lock, flags);
321+
guard(raw_spinlock_irqsave)(&irq->irq_lock);
323322
irq->target_vcpu = vcpu;
324-
raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
325323

326-
if (irq->hw) {
327-
struct its_vlpi_map map;
328-
329-
ret = its_get_vlpi(irq->host_irq, &map);
330-
if (ret)
331-
return ret;
324+
if (!irq->hw)
325+
return 0;
332326

333-
if (map.vpe)
334-
atomic_dec(&map.vpe->vlpi_count);
335-
map.vpe = &vcpu->arch.vgic_cpu.vgic_v3.its_vpe;
336-
atomic_inc(&map.vpe->vlpi_count);
327+
ret = its_get_vlpi(irq->host_irq, &map);
328+
if (ret)
329+
return ret;
337330

338-
ret = its_map_vlpi(irq->host_irq, &map);
339-
}
331+
if (map.vpe)
332+
atomic_dec(&map.vpe->vlpi_count);
340333

341-
return ret;
334+
map.vpe = &vcpu->arch.vgic_cpu.vgic_v3.its_vpe;
335+
atomic_inc(&map.vpe->vlpi_count);
336+
return its_map_vlpi(irq->host_irq, &map);
342337
}
343338

344339
static struct kvm_vcpu *collection_to_vcpu(struct kvm *kvm,
@@ -756,12 +751,17 @@ int vgic_its_inject_msi(struct kvm *kvm, struct kvm_msi *msi)
756751
/* Requires the its_lock to be held. */
757752
static void its_free_ite(struct kvm *kvm, struct its_ite *ite)
758753
{
754+
struct vgic_irq *irq = ite->irq;
759755
list_del(&ite->ite_list);
760756

761757
/* This put matches the get in vgic_add_lpi. */
762-
if (ite->irq) {
763-
if (ite->irq->hw)
764-
WARN_ON(its_unmap_vlpi(ite->irq->host_irq));
758+
if (irq) {
759+
scoped_guard(raw_spinlock_irqsave, &irq->irq_lock) {
760+
if (irq->hw)
761+
WARN_ON(its_unmap_vlpi(ite->irq->host_irq));
762+
763+
irq->hw = false;
764+
}
765765

766766
vgic_put_irq(kvm, ite->irq);
767767
}

arch/arm64/kvm/vgic/vgic-v4.c

Lines changed: 50 additions & 42 deletions
Original file line numberDiff line numberDiff line change
@@ -444,7 +444,7 @@ int kvm_vgic_v4_set_forwarding(struct kvm *kvm, int virq,
444444
if (IS_ERR(its))
445445
return 0;
446446

447-
mutex_lock(&its->its_lock);
447+
guard(mutex)(&its->its_lock);
448448

449449
/*
450450
* Perform the actual DevID/EventID -> LPI translation.
@@ -455,11 +455,13 @@ int kvm_vgic_v4_set_forwarding(struct kvm *kvm, int virq,
455455
*/
456456
if (vgic_its_resolve_lpi(kvm, its, irq_entry->msi.devid,
457457
irq_entry->msi.data, &irq))
458-
goto out;
458+
return 0;
459+
460+
raw_spin_lock_irqsave(&irq->irq_lock, flags);
459461

460462
/* Silently exit if the vLPI is already mapped */
461463
if (irq->hw)
462-
goto out;
464+
goto out_unlock_irq;
463465

464466
/*
465467
* Emit the mapping request. If it fails, the ITS probably
@@ -479,68 +481,74 @@ int kvm_vgic_v4_set_forwarding(struct kvm *kvm, int virq,
479481

480482
ret = its_map_vlpi(virq, &map);
481483
if (ret)
482-
goto out;
484+
goto out_unlock_irq;
483485

484486
irq->hw = true;
485487
irq->host_irq = virq;
486488
atomic_inc(&map.vpe->vlpi_count);
487489

488490
/* Transfer pending state */
489-
raw_spin_lock_irqsave(&irq->irq_lock, flags);
490-
if (irq->pending_latch) {
491-
ret = irq_set_irqchip_state(irq->host_irq,
492-
IRQCHIP_STATE_PENDING,
493-
irq->pending_latch);
494-
WARN_RATELIMIT(ret, "IRQ %d", irq->host_irq);
491+
if (!irq->pending_latch)
492+
goto out_unlock_irq;
495493

496-
/*
497-
* Clear pending_latch and communicate this state
498-
* change via vgic_queue_irq_unlock.
499-
*/
500-
irq->pending_latch = false;
501-
vgic_queue_irq_unlock(kvm, irq, flags);
502-
} else {
503-
raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
504-
}
494+
ret = irq_set_irqchip_state(irq->host_irq, IRQCHIP_STATE_PENDING,
495+
irq->pending_latch);
496+
WARN_RATELIMIT(ret, "IRQ %d", irq->host_irq);
505497

506-
out:
507-
mutex_unlock(&its->its_lock);
498+
/*
499+
* Clear pending_latch and communicate this state
500+
* change via vgic_queue_irq_unlock.
501+
*/
502+
irq->pending_latch = false;
503+
vgic_queue_irq_unlock(kvm, irq, flags);
504+
return ret;
505+
506+
out_unlock_irq:
507+
raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
508508
return ret;
509509
}
510510

511-
int kvm_vgic_v4_unset_forwarding(struct kvm *kvm, int virq,
512-
struct kvm_kernel_irq_routing_entry *irq_entry)
511+
static struct vgic_irq *__vgic_host_irq_get_vlpi(struct kvm *kvm, int host_irq)
513512
{
514-
struct vgic_its *its;
515513
struct vgic_irq *irq;
516-
int ret;
514+
unsigned long idx;
515+
516+
guard(rcu)();
517+
xa_for_each(&kvm->arch.vgic.lpi_xa, idx, irq) {
518+
if (!irq->hw || irq->host_irq != host_irq)
519+
continue;
520+
521+
if (!vgic_try_get_irq_kref(irq))
522+
return NULL;
523+
524+
return irq;
525+
}
526+
527+
return NULL;
528+
}
529+
530+
int kvm_vgic_v4_unset_forwarding(struct kvm *kvm, int host_irq)
531+
{
532+
struct vgic_irq *irq;
533+
unsigned long flags;
534+
int ret = 0;
517535

518536
if (!vgic_supports_direct_msis(kvm))
519537
return 0;
520538

521-
/*
522-
* Get the ITS, and escape early on error (not a valid
523-
* doorbell for any of our vITSs).
524-
*/
525-
its = vgic_get_its(kvm, irq_entry);
526-
if (IS_ERR(its))
539+
irq = __vgic_host_irq_get_vlpi(kvm, host_irq);
540+
if (!irq)
527541
return 0;
528542

529-
mutex_lock(&its->its_lock);
530-
531-
ret = vgic_its_resolve_lpi(kvm, its, irq_entry->msi.devid,
532-
irq_entry->msi.data, &irq);
533-
if (ret)
534-
goto out;
535-
536-
WARN_ON(irq->hw && irq->host_irq != virq);
543+
raw_spin_lock_irqsave(&irq->irq_lock, flags);
544+
WARN_ON(irq->hw && irq->host_irq != host_irq);
537545
if (irq->hw) {
538546
atomic_dec(&irq->target_vcpu->arch.vgic_cpu.vgic_v3.its_vpe.vlpi_count);
539547
irq->hw = false;
540-
ret = its_unmap_vlpi(virq);
548+
ret = its_unmap_vlpi(host_irq);
541549
}
542550

543-
out:
544-
mutex_unlock(&its->its_lock);
551+
raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
552+
vgic_put_irq(kvm, irq);
545553
return ret;
546554
}

include/kvm/arm_vgic.h

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -434,8 +434,7 @@ struct kvm_kernel_irq_routing_entry;
434434
int kvm_vgic_v4_set_forwarding(struct kvm *kvm, int irq,
435435
struct kvm_kernel_irq_routing_entry *irq_entry);
436436

437-
int kvm_vgic_v4_unset_forwarding(struct kvm *kvm, int irq,
438-
struct kvm_kernel_irq_routing_entry *irq_entry);
437+
int kvm_vgic_v4_unset_forwarding(struct kvm *kvm, int host_irq);
439438

440439
int vgic_v4_load(struct kvm_vcpu *vcpu);
441440
void vgic_v4_commit(struct kvm_vcpu *vcpu);

0 commit comments

Comments
 (0)