@@ -5721,9 +5721,6 @@ static void sched_tick_remote(struct work_struct *work)
5721
5721
struct tick_work * twork = container_of (dwork , struct tick_work , work );
5722
5722
int cpu = twork -> cpu ;
5723
5723
struct rq * rq = cpu_rq (cpu );
5724
- struct task_struct * curr ;
5725
- struct rq_flags rf ;
5726
- u64 delta ;
5727
5724
int os ;
5728
5725
5729
5726
/*
@@ -5733,30 +5730,26 @@ static void sched_tick_remote(struct work_struct *work)
5733
5730
* statistics and checks timeslices in a time-independent way, regardless
5734
5731
* of when exactly it is running.
5735
5732
*/
5736
- if (!tick_nohz_tick_stopped_cpu (cpu ))
5737
- goto out_requeue ;
5733
+ if (tick_nohz_tick_stopped_cpu (cpu )) {
5734
+ guard (rq_lock_irq )(rq );
5735
+ struct task_struct * curr = rq -> curr ;
5738
5736
5739
- rq_lock_irq (rq , & rf );
5740
- curr = rq -> curr ;
5741
- if (cpu_is_offline (cpu ))
5742
- goto out_unlock ;
5737
+ if (cpu_online (cpu )) {
5738
+ update_rq_clock (rq );
5743
5739
5744
- update_rq_clock (rq );
5740
+ if (!is_idle_task (curr )) {
5741
+ /*
5742
+ * Make sure the next tick runs within a
5743
+ * reasonable amount of time.
5744
+ */
5745
+ u64 delta = rq_clock_task (rq ) - curr -> se .exec_start ;
5746
+ WARN_ON_ONCE (delta > (u64 )NSEC_PER_SEC * 3 );
5747
+ }
5748
+ curr -> sched_class -> task_tick (rq , curr , 0 );
5745
5749
5746
- if (!is_idle_task (curr )) {
5747
- /*
5748
- * Make sure the next tick runs within a reasonable
5749
- * amount of time.
5750
- */
5751
- delta = rq_clock_task (rq ) - curr -> se .exec_start ;
5752
- WARN_ON_ONCE (delta > (u64 )NSEC_PER_SEC * 3 );
5750
+ calc_load_nohz_remote (rq );
5751
+ }
5753
5752
}
5754
- curr -> sched_class -> task_tick (rq , curr , 0 );
5755
-
5756
- calc_load_nohz_remote (rq );
5757
- out_unlock :
5758
- rq_unlock_irq (rq , & rf );
5759
- out_requeue :
5760
5753
5761
5754
/*
5762
5755
* Run the remote tick once per second (1Hz). This arbitrary
0 commit comments