@@ -133,19 +133,40 @@ static void inline clocksource_watchdog_unlock(unsigned long *flags)
133
133
spin_unlock_irqrestore (& watchdog_lock , * flags );
134
134
}
135
135
136
+ static int clocksource_watchdog_kthread (void * data );
137
+ static void __clocksource_change_rating (struct clocksource * cs , int rating );
138
+
136
139
/*
137
140
* Interval: 0.5sec Threshold: 0.0625s
138
141
*/
139
142
#define WATCHDOG_INTERVAL (HZ >> 1)
140
143
#define WATCHDOG_THRESHOLD (NSEC_PER_SEC >> 4)
141
144
145
+ static void clocksource_watchdog_work (struct work_struct * work )
146
+ {
147
+ /*
148
+ * We cannot directly run clocksource_watchdog_kthread() here, because
149
+ * clocksource_select() calls timekeeping_notify() which uses
150
+ * stop_machine(). One cannot use stop_machine() from a workqueue() due
151
+ * lock inversions wrt CPU hotplug.
152
+ *
153
+ * Also, we only ever run this work once or twice during the lifetime
154
+ * of the kernel, so there is no point in creating a more permanent
155
+ * kthread for this.
156
+ *
157
+ * If kthread_run fails the next watchdog scan over the
158
+ * watchdog_list will find the unstable clock again.
159
+ */
160
+ kthread_run (clocksource_watchdog_kthread , NULL , "kwatchdog" );
161
+ }
162
+
142
163
static void __clocksource_unstable (struct clocksource * cs )
143
164
{
144
165
cs -> flags &= ~(CLOCK_SOURCE_VALID_FOR_HRES | CLOCK_SOURCE_WATCHDOG );
145
166
cs -> flags |= CLOCK_SOURCE_UNSTABLE ;
146
167
147
168
/*
148
- * If the clocksource is registered clocksource_watchdog_work () will
169
+ * If the clocksource is registered clocksource_watchdog_kthread () will
149
170
* re-rate and re-select.
150
171
*/
151
172
if (list_empty (& cs -> list )) {
@@ -156,7 +177,7 @@ static void __clocksource_unstable(struct clocksource *cs)
156
177
if (cs -> mark_unstable )
157
178
cs -> mark_unstable (cs );
158
179
159
- /* kick clocksource_watchdog_work () */
180
+ /* kick clocksource_watchdog_kthread () */
160
181
if (finished_booting )
161
182
schedule_work (& watchdog_work );
162
183
}
@@ -166,7 +187,7 @@ static void __clocksource_unstable(struct clocksource *cs)
166
187
* @cs: clocksource to be marked unstable
167
188
*
168
189
* This function is called by the x86 TSC code to mark clocksources as unstable;
169
- * it defers demotion and re-selection to a work .
190
+ * it defers demotion and re-selection to a kthread .
170
191
*/
171
192
void clocksource_mark_unstable (struct clocksource * cs )
172
193
{
@@ -391,9 +412,7 @@ static void clocksource_dequeue_watchdog(struct clocksource *cs)
391
412
}
392
413
}
393
414
394
- static void __clocksource_change_rating (struct clocksource * cs , int rating );
395
-
396
- static int __clocksource_watchdog_work (void )
415
+ static int __clocksource_watchdog_kthread (void )
397
416
{
398
417
struct clocksource * cs , * tmp ;
399
418
unsigned long flags ;
@@ -418,12 +437,13 @@ static int __clocksource_watchdog_work(void)
418
437
return select ;
419
438
}
420
439
421
- static void clocksource_watchdog_work ( struct work_struct * work )
440
+ static int clocksource_watchdog_kthread ( void * data )
422
441
{
423
442
mutex_lock (& clocksource_mutex );
424
- if (__clocksource_watchdog_work ())
443
+ if (__clocksource_watchdog_kthread ())
425
444
clocksource_select ();
426
445
mutex_unlock (& clocksource_mutex );
446
+ return 0 ;
427
447
}
428
448
429
449
static bool clocksource_is_watchdog (struct clocksource * cs )
@@ -442,7 +462,7 @@ static void clocksource_enqueue_watchdog(struct clocksource *cs)
442
462
static void clocksource_select_watchdog (bool fallback ) { }
443
463
static inline void clocksource_dequeue_watchdog (struct clocksource * cs ) { }
444
464
static inline void clocksource_resume_watchdog (void ) { }
445
- static inline int __clocksource_watchdog_work (void ) { return 0 ; }
465
+ static inline int __clocksource_watchdog_kthread (void ) { return 0 ; }
446
466
static bool clocksource_is_watchdog (struct clocksource * cs ) { return false; }
447
467
void clocksource_mark_unstable (struct clocksource * cs ) { }
448
468
@@ -810,7 +830,7 @@ static int __init clocksource_done_booting(void)
810
830
/*
811
831
* Run the watchdog first to eliminate unstable clock sources
812
832
*/
813
- __clocksource_watchdog_work ();
833
+ __clocksource_watchdog_kthread ();
814
834
clocksource_select ();
815
835
mutex_unlock (& clocksource_mutex );
816
836
return 0 ;
0 commit comments