@@ -546,8 +546,11 @@ static ktime_t __hrtimer_next_event_base(struct hrtimer_cpu_base *cpu_base,
546
546
}
547
547
548
548
/*
549
- * Recomputes cpu_base::*next_timer and returns the earliest expires_next but
550
- * does not set cpu_base::*expires_next, that is done by hrtimer_reprogram.
549
+ * Recomputes cpu_base::*next_timer and returns the earliest expires_next
550
+ * but does not set cpu_base::*expires_next, that is done by
551
+ * hrtimer[_force]_reprogram and hrtimer_interrupt only. When updating
552
+ * cpu_base::*expires_next right away, reprogramming logic would no longer
553
+ * work.
551
554
*
552
555
* When a softirq is pending, we can ignore the HRTIMER_ACTIVE_SOFT bases,
553
556
* those timers will get run whenever the softirq gets handled, at the end of
@@ -588,6 +591,37 @@ __hrtimer_get_next_event(struct hrtimer_cpu_base *cpu_base, unsigned int active_
588
591
return expires_next ;
589
592
}
590
593
594
+ static ktime_t hrtimer_update_next_event (struct hrtimer_cpu_base * cpu_base )
595
+ {
596
+ ktime_t expires_next , soft = KTIME_MAX ;
597
+
598
+ /*
599
+ * If the soft interrupt has already been activated, ignore the
600
+ * soft bases. They will be handled in the already raised soft
601
+ * interrupt.
602
+ */
603
+ if (!cpu_base -> softirq_activated ) {
604
+ soft = __hrtimer_get_next_event (cpu_base , HRTIMER_ACTIVE_SOFT );
605
+ /*
606
+ * Update the soft expiry time. clock_settime() might have
607
+ * affected it.
608
+ */
609
+ cpu_base -> softirq_expires_next = soft ;
610
+ }
611
+
612
+ expires_next = __hrtimer_get_next_event (cpu_base , HRTIMER_ACTIVE_HARD );
613
+ /*
614
+ * If a softirq timer is expiring first, update cpu_base->next_timer
615
+ * and program the hardware with the soft expiry time.
616
+ */
617
+ if (expires_next > soft ) {
618
+ cpu_base -> next_timer = cpu_base -> softirq_next_timer ;
619
+ expires_next = soft ;
620
+ }
621
+
622
+ return expires_next ;
623
+ }
624
+
591
625
static inline ktime_t hrtimer_update_base (struct hrtimer_cpu_base * base )
592
626
{
593
627
ktime_t * offs_real = & base -> clock_base [HRTIMER_BASE_REALTIME ].offset ;
@@ -628,23 +662,7 @@ hrtimer_force_reprogram(struct hrtimer_cpu_base *cpu_base, int skip_equal)
628
662
{
629
663
ktime_t expires_next ;
630
664
631
- /*
632
- * Find the current next expiration time.
633
- */
634
- expires_next = __hrtimer_get_next_event (cpu_base , HRTIMER_ACTIVE_ALL );
635
-
636
- if (cpu_base -> next_timer && cpu_base -> next_timer -> is_soft ) {
637
- /*
638
- * When the softirq is activated, hrtimer has to be
639
- * programmed with the first hard hrtimer because soft
640
- * timer interrupt could occur too late.
641
- */
642
- if (cpu_base -> softirq_activated )
643
- expires_next = __hrtimer_get_next_event (cpu_base ,
644
- HRTIMER_ACTIVE_HARD );
645
- else
646
- cpu_base -> softirq_expires_next = expires_next ;
647
- }
665
+ expires_next = hrtimer_update_next_event (cpu_base );
648
666
649
667
if (skip_equal && expires_next == cpu_base -> expires_next )
650
668
return ;
@@ -1644,8 +1662,8 @@ void hrtimer_interrupt(struct clock_event_device *dev)
1644
1662
1645
1663
__hrtimer_run_queues (cpu_base , now , flags , HRTIMER_ACTIVE_HARD );
1646
1664
1647
- /* Reevaluate the clock bases for the next expiry */
1648
- expires_next = __hrtimer_get_next_event (cpu_base , HRTIMER_ACTIVE_ALL );
1665
+ /* Reevaluate the clock bases for the [soft] next expiry */
1666
+ expires_next = hrtimer_update_next_event (cpu_base );
1649
1667
/*
1650
1668
* Store the new expiry value so the migration code can verify
1651
1669
* against it.
0 commit comments