Skip to content

Commit 6c62cc4

Browse files
committed
Merge tag 'kvm-arm-fixes-for-v4.16-1' of git://git.kernel.org/pub/scm/linux/kernel/git/kvmarm/kvmarm into HEAD
KVM/ARM Fixes for v4.16, Round 1 Fix the interaction of userspace irqchip VMs with in-kernl irqchip VMs and make sure we can build 32-bit KVM/ARM with gcc-8.
2 parents 722c2cd + 67870eb commit 6c62cc4

File tree

3 files changed

+73
-52
lines changed

3 files changed

+73
-52
lines changed

arch/arm/kvm/hyp/Makefile

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -7,6 +7,8 @@ ccflags-y += -fno-stack-protector -DDISABLE_BRANCH_PROFILING
77

88
KVM=../../../../virt/kvm
99

10+
CFLAGS_ARMV7VE :=$(call cc-option, -march=armv7ve)
11+
1012
obj-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/hyp/vgic-v2-sr.o
1113
obj-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/hyp/vgic-v3-sr.o
1214
obj-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/hyp/timer-sr.o
@@ -15,7 +17,10 @@ obj-$(CONFIG_KVM_ARM_HOST) += tlb.o
1517
obj-$(CONFIG_KVM_ARM_HOST) += cp15-sr.o
1618
obj-$(CONFIG_KVM_ARM_HOST) += vfp.o
1719
obj-$(CONFIG_KVM_ARM_HOST) += banked-sr.o
20+
CFLAGS_banked-sr.o += $(CFLAGS_ARMV7VE)
21+
1822
obj-$(CONFIG_KVM_ARM_HOST) += entry.o
1923
obj-$(CONFIG_KVM_ARM_HOST) += hyp-entry.o
2024
obj-$(CONFIG_KVM_ARM_HOST) += switch.o
25+
CFLAGS_switch.o += $(CFLAGS_ARMV7VE)
2126
obj-$(CONFIG_KVM_ARM_HOST) += s2-setup.o

arch/arm/kvm/hyp/banked-sr.c

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -20,6 +20,10 @@
2020

2121
#include <asm/kvm_hyp.h>
2222

23+
/*
24+
* gcc before 4.9 doesn't understand -march=armv7ve, so we have to
25+
* trick the assembler.
26+
*/
2327
__asm__(".arch_extension virt");
2428

2529
void __hyp_text __banked_save_state(struct kvm_cpu_context *ctxt)

virt/kvm/arm/arch_timer.c

Lines changed: 64 additions & 52 deletions
Original file line numberDiff line numberDiff line change
@@ -36,6 +36,8 @@ static struct timecounter *timecounter;
3636
static unsigned int host_vtimer_irq;
3737
static u32 host_vtimer_irq_flags;
3838

39+
static DEFINE_STATIC_KEY_FALSE(has_gic_active_state);
40+
3941
static const struct kvm_irq_level default_ptimer_irq = {
4042
.irq = 30,
4143
.level = 1,
@@ -56,6 +58,12 @@ u64 kvm_phys_timer_read(void)
5658
return timecounter->cc->read(timecounter->cc);
5759
}
5860

61+
static inline bool userspace_irqchip(struct kvm *kvm)
62+
{
63+
return static_branch_unlikely(&userspace_irqchip_in_use) &&
64+
unlikely(!irqchip_in_kernel(kvm));
65+
}
66+
5967
static void soft_timer_start(struct hrtimer *hrt, u64 ns)
6068
{
6169
hrtimer_start(hrt, ktime_add_ns(ktime_get(), ns),
@@ -69,25 +77,6 @@ static void soft_timer_cancel(struct hrtimer *hrt, struct work_struct *work)
6977
cancel_work_sync(work);
7078
}
7179

72-
static void kvm_vtimer_update_mask_user(struct kvm_vcpu *vcpu)
73-
{
74-
struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
75-
76-
/*
77-
* When using a userspace irqchip with the architected timers, we must
78-
* prevent continuously exiting from the guest, and therefore mask the
79-
* physical interrupt by disabling it on the host interrupt controller
80-
* when the virtual level is high, such that the guest can make
81-
* forward progress. Once we detect the output level being
82-
* de-asserted, we unmask the interrupt again so that we exit from the
83-
* guest when the timer fires.
84-
*/
85-
if (vtimer->irq.level)
86-
disable_percpu_irq(host_vtimer_irq);
87-
else
88-
enable_percpu_irq(host_vtimer_irq, 0);
89-
}
90-
9180
static irqreturn_t kvm_arch_timer_handler(int irq, void *dev_id)
9281
{
9382
struct kvm_vcpu *vcpu = *(struct kvm_vcpu **)dev_id;
@@ -106,9 +95,9 @@ static irqreturn_t kvm_arch_timer_handler(int irq, void *dev_id)
10695
if (kvm_timer_should_fire(vtimer))
10796
kvm_timer_update_irq(vcpu, true, vtimer);
10897

109-
if (static_branch_unlikely(&userspace_irqchip_in_use) &&
110-
unlikely(!irqchip_in_kernel(vcpu->kvm)))
111-
kvm_vtimer_update_mask_user(vcpu);
98+
if (userspace_irqchip(vcpu->kvm) &&
99+
!static_branch_unlikely(&has_gic_active_state))
100+
disable_percpu_irq(host_vtimer_irq);
112101

113102
return IRQ_HANDLED;
114103
}
@@ -290,8 +279,7 @@ static void kvm_timer_update_irq(struct kvm_vcpu *vcpu, bool new_level,
290279
trace_kvm_timer_update_irq(vcpu->vcpu_id, timer_ctx->irq.irq,
291280
timer_ctx->irq.level);
292281

293-
if (!static_branch_unlikely(&userspace_irqchip_in_use) ||
294-
likely(irqchip_in_kernel(vcpu->kvm))) {
282+
if (!userspace_irqchip(vcpu->kvm)) {
295283
ret = kvm_vgic_inject_irq(vcpu->kvm, vcpu->vcpu_id,
296284
timer_ctx->irq.irq,
297285
timer_ctx->irq.level,
@@ -350,12 +338,6 @@ static void kvm_timer_update_state(struct kvm_vcpu *vcpu)
350338
phys_timer_emulate(vcpu);
351339
}
352340

353-
static void __timer_snapshot_state(struct arch_timer_context *timer)
354-
{
355-
timer->cnt_ctl = read_sysreg_el0(cntv_ctl);
356-
timer->cnt_cval = read_sysreg_el0(cntv_cval);
357-
}
358-
359341
static void vtimer_save_state(struct kvm_vcpu *vcpu)
360342
{
361343
struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
@@ -367,8 +349,10 @@ static void vtimer_save_state(struct kvm_vcpu *vcpu)
367349
if (!vtimer->loaded)
368350
goto out;
369351

370-
if (timer->enabled)
371-
__timer_snapshot_state(vtimer);
352+
if (timer->enabled) {
353+
vtimer->cnt_ctl = read_sysreg_el0(cntv_ctl);
354+
vtimer->cnt_cval = read_sysreg_el0(cntv_cval);
355+
}
372356

373357
/* Disable the virtual timer */
374358
write_sysreg_el0(0, cntv_ctl);
@@ -460,23 +444,43 @@ static void set_cntvoff(u64 cntvoff)
460444
kvm_call_hyp(__kvm_timer_set_cntvoff, low, high);
461445
}
462446

463-
static void kvm_timer_vcpu_load_vgic(struct kvm_vcpu *vcpu)
447+
static inline void set_vtimer_irq_phys_active(struct kvm_vcpu *vcpu, bool active)
448+
{
449+
int r;
450+
r = irq_set_irqchip_state(host_vtimer_irq, IRQCHIP_STATE_ACTIVE, active);
451+
WARN_ON(r);
452+
}
453+
454+
static void kvm_timer_vcpu_load_gic(struct kvm_vcpu *vcpu)
464455
{
465456
struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
466457
bool phys_active;
467-
int ret;
468458

469-
phys_active = kvm_vgic_map_is_active(vcpu, vtimer->irq.irq);
470-
471-
ret = irq_set_irqchip_state(host_vtimer_irq,
472-
IRQCHIP_STATE_ACTIVE,
473-
phys_active);
474-
WARN_ON(ret);
459+
if (irqchip_in_kernel(vcpu->kvm))
460+
phys_active = kvm_vgic_map_is_active(vcpu, vtimer->irq.irq);
461+
else
462+
phys_active = vtimer->irq.level;
463+
set_vtimer_irq_phys_active(vcpu, phys_active);
475464
}
476465

477-
static void kvm_timer_vcpu_load_user(struct kvm_vcpu *vcpu)
466+
static void kvm_timer_vcpu_load_nogic(struct kvm_vcpu *vcpu)
478467
{
479-
kvm_vtimer_update_mask_user(vcpu);
468+
struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
469+
470+
/*
471+
* When using a userspace irqchip with the architected timers and a
472+
* host interrupt controller that doesn't support an active state, we
473+
* must still prevent continuously exiting from the guest, and
474+
* therefore mask the physical interrupt by disabling it on the host
475+
* interrupt controller when the virtual level is high, such that the
476+
* guest can make forward progress. Once we detect the output level
477+
* being de-asserted, we unmask the interrupt again so that we exit
478+
* from the guest when the timer fires.
479+
*/
480+
if (vtimer->irq.level)
481+
disable_percpu_irq(host_vtimer_irq);
482+
else
483+
enable_percpu_irq(host_vtimer_irq, host_vtimer_irq_flags);
480484
}
481485

482486
void kvm_timer_vcpu_load(struct kvm_vcpu *vcpu)
@@ -487,10 +491,10 @@ void kvm_timer_vcpu_load(struct kvm_vcpu *vcpu)
487491
if (unlikely(!timer->enabled))
488492
return;
489493

490-
if (unlikely(!irqchip_in_kernel(vcpu->kvm)))
491-
kvm_timer_vcpu_load_user(vcpu);
494+
if (static_branch_likely(&has_gic_active_state))
495+
kvm_timer_vcpu_load_gic(vcpu);
492496
else
493-
kvm_timer_vcpu_load_vgic(vcpu);
497+
kvm_timer_vcpu_load_nogic(vcpu);
494498

495499
set_cntvoff(vtimer->cntvoff);
496500

@@ -555,18 +559,24 @@ static void unmask_vtimer_irq_user(struct kvm_vcpu *vcpu)
555559
{
556560
struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
557561

558-
if (unlikely(!irqchip_in_kernel(vcpu->kvm))) {
559-
__timer_snapshot_state(vtimer);
560-
if (!kvm_timer_should_fire(vtimer)) {
561-
kvm_timer_update_irq(vcpu, false, vtimer);
562-
kvm_vtimer_update_mask_user(vcpu);
563-
}
562+
if (!kvm_timer_should_fire(vtimer)) {
563+
kvm_timer_update_irq(vcpu, false, vtimer);
564+
if (static_branch_likely(&has_gic_active_state))
565+
set_vtimer_irq_phys_active(vcpu, false);
566+
else
567+
enable_percpu_irq(host_vtimer_irq, host_vtimer_irq_flags);
564568
}
565569
}
566570

567571
void kvm_timer_sync_hwstate(struct kvm_vcpu *vcpu)
568572
{
569-
unmask_vtimer_irq_user(vcpu);
573+
struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
574+
575+
if (unlikely(!timer->enabled))
576+
return;
577+
578+
if (unlikely(!irqchip_in_kernel(vcpu->kvm)))
579+
unmask_vtimer_irq_user(vcpu);
570580
}
571581

572582
int kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu)
@@ -753,6 +763,8 @@ int kvm_timer_hyp_init(bool has_gic)
753763
kvm_err("kvm_arch_timer: error setting vcpu affinity\n");
754764
goto out_free_irq;
755765
}
766+
767+
static_branch_enable(&has_gic_active_state);
756768
}
757769

758770
kvm_info("virtual timer IRQ%d\n", host_vtimer_irq);

0 commit comments

Comments
 (0)