@@ -36,6 +36,8 @@ static struct timecounter *timecounter;
36
36
static unsigned int host_vtimer_irq ;
37
37
static u32 host_vtimer_irq_flags ;
38
38
39
+ static DEFINE_STATIC_KEY_FALSE (has_gic_active_state );
40
+
39
41
static const struct kvm_irq_level default_ptimer_irq = {
40
42
.irq = 30 ,
41
43
.level = 1 ,
@@ -56,6 +58,12 @@ u64 kvm_phys_timer_read(void)
56
58
return timecounter -> cc -> read (timecounter -> cc );
57
59
}
58
60
61
+ static inline bool userspace_irqchip (struct kvm * kvm )
62
+ {
63
+ return static_branch_unlikely (& userspace_irqchip_in_use ) &&
64
+ unlikely (!irqchip_in_kernel (kvm ));
65
+ }
66
+
59
67
static void soft_timer_start (struct hrtimer * hrt , u64 ns )
60
68
{
61
69
hrtimer_start (hrt , ktime_add_ns (ktime_get (), ns ),
@@ -69,25 +77,6 @@ static void soft_timer_cancel(struct hrtimer *hrt, struct work_struct *work)
69
77
cancel_work_sync (work );
70
78
}
71
79
72
- static void kvm_vtimer_update_mask_user (struct kvm_vcpu * vcpu )
73
- {
74
- struct arch_timer_context * vtimer = vcpu_vtimer (vcpu );
75
-
76
- /*
77
- * When using a userspace irqchip with the architected timers, we must
78
- * prevent continuously exiting from the guest, and therefore mask the
79
- * physical interrupt by disabling it on the host interrupt controller
80
- * when the virtual level is high, such that the guest can make
81
- * forward progress. Once we detect the output level being
82
- * de-asserted, we unmask the interrupt again so that we exit from the
83
- * guest when the timer fires.
84
- */
85
- if (vtimer -> irq .level )
86
- disable_percpu_irq (host_vtimer_irq );
87
- else
88
- enable_percpu_irq (host_vtimer_irq , 0 );
89
- }
90
-
91
80
static irqreturn_t kvm_arch_timer_handler (int irq , void * dev_id )
92
81
{
93
82
struct kvm_vcpu * vcpu = * (struct kvm_vcpu * * )dev_id ;
@@ -106,9 +95,9 @@ static irqreturn_t kvm_arch_timer_handler(int irq, void *dev_id)
106
95
if (kvm_timer_should_fire (vtimer ))
107
96
kvm_timer_update_irq (vcpu , true, vtimer );
108
97
109
- if (static_branch_unlikely ( & userspace_irqchip_in_use ) &&
110
- unlikely (! irqchip_in_kernel ( vcpu -> kvm ) ))
111
- kvm_vtimer_update_mask_user ( vcpu );
98
+ if (userspace_irqchip ( vcpu -> kvm ) &&
99
+ ! static_branch_unlikely ( & has_gic_active_state ))
100
+ disable_percpu_irq ( host_vtimer_irq );
112
101
113
102
return IRQ_HANDLED ;
114
103
}
@@ -290,8 +279,7 @@ static void kvm_timer_update_irq(struct kvm_vcpu *vcpu, bool new_level,
290
279
trace_kvm_timer_update_irq (vcpu -> vcpu_id , timer_ctx -> irq .irq ,
291
280
timer_ctx -> irq .level );
292
281
293
- if (!static_branch_unlikely (& userspace_irqchip_in_use ) ||
294
- likely (irqchip_in_kernel (vcpu -> kvm ))) {
282
+ if (!userspace_irqchip (vcpu -> kvm )) {
295
283
ret = kvm_vgic_inject_irq (vcpu -> kvm , vcpu -> vcpu_id ,
296
284
timer_ctx -> irq .irq ,
297
285
timer_ctx -> irq .level ,
@@ -350,12 +338,6 @@ static void kvm_timer_update_state(struct kvm_vcpu *vcpu)
350
338
phys_timer_emulate (vcpu );
351
339
}
352
340
353
- static void __timer_snapshot_state (struct arch_timer_context * timer )
354
- {
355
- timer -> cnt_ctl = read_sysreg_el0 (cntv_ctl );
356
- timer -> cnt_cval = read_sysreg_el0 (cntv_cval );
357
- }
358
-
359
341
static void vtimer_save_state (struct kvm_vcpu * vcpu )
360
342
{
361
343
struct arch_timer_cpu * timer = & vcpu -> arch .timer_cpu ;
@@ -367,8 +349,10 @@ static void vtimer_save_state(struct kvm_vcpu *vcpu)
367
349
if (!vtimer -> loaded )
368
350
goto out ;
369
351
370
- if (timer -> enabled )
371
- __timer_snapshot_state (vtimer );
352
+ if (timer -> enabled ) {
353
+ vtimer -> cnt_ctl = read_sysreg_el0 (cntv_ctl );
354
+ vtimer -> cnt_cval = read_sysreg_el0 (cntv_cval );
355
+ }
372
356
373
357
/* Disable the virtual timer */
374
358
write_sysreg_el0 (0 , cntv_ctl );
@@ -460,23 +444,43 @@ static void set_cntvoff(u64 cntvoff)
460
444
kvm_call_hyp (__kvm_timer_set_cntvoff , low , high );
461
445
}
462
446
463
- static void kvm_timer_vcpu_load_vgic (struct kvm_vcpu * vcpu )
447
+ static inline void set_vtimer_irq_phys_active (struct kvm_vcpu * vcpu , bool active )
448
+ {
449
+ int r ;
450
+ r = irq_set_irqchip_state (host_vtimer_irq , IRQCHIP_STATE_ACTIVE , active );
451
+ WARN_ON (r );
452
+ }
453
+
454
+ static void kvm_timer_vcpu_load_gic (struct kvm_vcpu * vcpu )
464
455
{
465
456
struct arch_timer_context * vtimer = vcpu_vtimer (vcpu );
466
457
bool phys_active ;
467
- int ret ;
468
458
469
- phys_active = kvm_vgic_map_is_active (vcpu , vtimer -> irq .irq );
470
-
471
- ret = irq_set_irqchip_state (host_vtimer_irq ,
472
- IRQCHIP_STATE_ACTIVE ,
473
- phys_active );
474
- WARN_ON (ret );
459
+ if (irqchip_in_kernel (vcpu -> kvm ))
460
+ phys_active = kvm_vgic_map_is_active (vcpu , vtimer -> irq .irq );
461
+ else
462
+ phys_active = vtimer -> irq .level ;
463
+ set_vtimer_irq_phys_active (vcpu , phys_active );
475
464
}
476
465
477
- static void kvm_timer_vcpu_load_user (struct kvm_vcpu * vcpu )
466
+ static void kvm_timer_vcpu_load_nogic (struct kvm_vcpu * vcpu )
478
467
{
479
- kvm_vtimer_update_mask_user (vcpu );
468
+ struct arch_timer_context * vtimer = vcpu_vtimer (vcpu );
469
+
470
+ /*
471
+ * When using a userspace irqchip with the architected timers and a
472
+ * host interrupt controller that doesn't support an active state, we
473
+ * must still prevent continuously exiting from the guest, and
474
+ * therefore mask the physical interrupt by disabling it on the host
475
+ * interrupt controller when the virtual level is high, such that the
476
+ * guest can make forward progress. Once we detect the output level
477
+ * being de-asserted, we unmask the interrupt again so that we exit
478
+ * from the guest when the timer fires.
479
+ */
480
+ if (vtimer -> irq .level )
481
+ disable_percpu_irq (host_vtimer_irq );
482
+ else
483
+ enable_percpu_irq (host_vtimer_irq , host_vtimer_irq_flags );
480
484
}
481
485
482
486
void kvm_timer_vcpu_load (struct kvm_vcpu * vcpu )
@@ -487,10 +491,10 @@ void kvm_timer_vcpu_load(struct kvm_vcpu *vcpu)
487
491
if (unlikely (!timer -> enabled ))
488
492
return ;
489
493
490
- if (unlikely (! irqchip_in_kernel ( vcpu -> kvm ) ))
491
- kvm_timer_vcpu_load_user (vcpu );
494
+ if (static_branch_likely ( & has_gic_active_state ))
495
+ kvm_timer_vcpu_load_gic (vcpu );
492
496
else
493
- kvm_timer_vcpu_load_vgic (vcpu );
497
+ kvm_timer_vcpu_load_nogic (vcpu );
494
498
495
499
set_cntvoff (vtimer -> cntvoff );
496
500
@@ -555,18 +559,24 @@ static void unmask_vtimer_irq_user(struct kvm_vcpu *vcpu)
555
559
{
556
560
struct arch_timer_context * vtimer = vcpu_vtimer (vcpu );
557
561
558
- if (unlikely (! irqchip_in_kernel ( vcpu -> kvm ) )) {
559
- __timer_snapshot_state ( vtimer );
560
- if (! kvm_timer_should_fire ( vtimer )) {
561
- kvm_timer_update_irq (vcpu , false, vtimer );
562
- kvm_vtimer_update_mask_user ( vcpu );
563
- }
562
+ if (! kvm_timer_should_fire ( vtimer )) {
563
+ kvm_timer_update_irq ( vcpu , false, vtimer );
564
+ if (static_branch_likely ( & has_gic_active_state ))
565
+ set_vtimer_irq_phys_active (vcpu , false);
566
+ else
567
+ enable_percpu_irq ( host_vtimer_irq , host_vtimer_irq_flags );
564
568
}
565
569
}
566
570
567
571
void kvm_timer_sync_hwstate (struct kvm_vcpu * vcpu )
568
572
{
569
- unmask_vtimer_irq_user (vcpu );
573
+ struct arch_timer_cpu * timer = & vcpu -> arch .timer_cpu ;
574
+
575
+ if (unlikely (!timer -> enabled ))
576
+ return ;
577
+
578
+ if (unlikely (!irqchip_in_kernel (vcpu -> kvm )))
579
+ unmask_vtimer_irq_user (vcpu );
570
580
}
571
581
572
582
int kvm_timer_vcpu_reset (struct kvm_vcpu * vcpu )
@@ -753,6 +763,8 @@ int kvm_timer_hyp_init(bool has_gic)
753
763
kvm_err ("kvm_arch_timer: error setting vcpu affinity\n" );
754
764
goto out_free_irq ;
755
765
}
766
+
767
+ static_branch_enable (& has_gic_active_state );
756
768
}
757
769
758
770
kvm_info ("virtual timer IRQ%d\n" , host_vtimer_irq );
0 commit comments