@@ -119,6 +119,16 @@ static DEFINE_SPINLOCK(watchdog_lock);
119
119
static int watchdog_running ;
120
120
static atomic_t watchdog_reset_pending ;
121
121
122
+ static void inline clocksource_watchdog_lock (unsigned long * flags )
123
+ {
124
+ spin_lock_irqsave (& watchdog_lock , * flags );
125
+ }
126
+
127
+ static void inline clocksource_watchdog_unlock (unsigned long * flags )
128
+ {
129
+ spin_unlock_irqrestore (& watchdog_lock , * flags );
130
+ }
131
+
122
132
static int clocksource_watchdog_kthread (void * data );
123
133
static void __clocksource_change_rating (struct clocksource * cs , int rating );
124
134
@@ -142,9 +152,19 @@ static void __clocksource_unstable(struct clocksource *cs)
142
152
cs -> flags &= ~(CLOCK_SOURCE_VALID_FOR_HRES | CLOCK_SOURCE_WATCHDOG );
143
153
cs -> flags |= CLOCK_SOURCE_UNSTABLE ;
144
154
155
+ /*
156
+ * If the clocksource is registered clocksource_watchdog_kthread() will
157
+ * re-rate and re-select.
158
+ */
159
+ if (list_empty (& cs -> list )) {
160
+ cs -> rating = 0 ;
161
+ return ;
162
+ }
163
+
145
164
if (cs -> mark_unstable )
146
165
cs -> mark_unstable (cs );
147
166
167
+ /* kick clocksource_watchdog_kthread() */
148
168
if (finished_booting )
149
169
schedule_work (& watchdog_work );
150
170
}
@@ -153,18 +173,16 @@ static void __clocksource_unstable(struct clocksource *cs)
153
173
* clocksource_mark_unstable - mark clocksource unstable via watchdog
154
174
* @cs: clocksource to be marked unstable
155
175
*
156
- * This function is called instead of clocksource_change_rating from
157
- * cpu hotplug code to avoid a deadlock between the clocksource mutex
158
- * and the cpu hotplug mutex. It defers the update of the clocksource
159
- * to the watchdog thread.
176
+ * This function is called by the x86 TSC code to mark clocksources as unstable;
177
+ * it defers demotion and re-selection to a kthread.
160
178
*/
161
179
void clocksource_mark_unstable (struct clocksource * cs )
162
180
{
163
181
unsigned long flags ;
164
182
165
183
spin_lock_irqsave (& watchdog_lock , flags );
166
184
if (!(cs -> flags & CLOCK_SOURCE_UNSTABLE )) {
167
- if (list_empty (& cs -> wd_list ))
185
+ if (! list_empty ( & cs -> list ) && list_empty (& cs -> wd_list ))
168
186
list_add (& cs -> wd_list , & watchdog_list );
169
187
__clocksource_unstable (cs );
170
188
}
@@ -319,9 +337,8 @@ static void clocksource_resume_watchdog(void)
319
337
320
338
static void clocksource_enqueue_watchdog (struct clocksource * cs )
321
339
{
322
- unsigned long flags ;
340
+ INIT_LIST_HEAD ( & cs -> wd_list ) ;
323
341
324
- spin_lock_irqsave (& watchdog_lock , flags );
325
342
if (cs -> flags & CLOCK_SOURCE_MUST_VERIFY ) {
326
343
/* cs is a clocksource to be watched. */
327
344
list_add (& cs -> wd_list , & watchdog_list );
@@ -331,7 +348,6 @@ static void clocksource_enqueue_watchdog(struct clocksource *cs)
331
348
if (cs -> flags & CLOCK_SOURCE_IS_CONTINUOUS )
332
349
cs -> flags |= CLOCK_SOURCE_VALID_FOR_HRES ;
333
350
}
334
- spin_unlock_irqrestore (& watchdog_lock , flags );
335
351
}
336
352
337
353
static void clocksource_select_watchdog (bool fallback )
@@ -373,9 +389,6 @@ static void clocksource_select_watchdog(bool fallback)
373
389
374
390
static void clocksource_dequeue_watchdog (struct clocksource * cs )
375
391
{
376
- unsigned long flags ;
377
-
378
- spin_lock_irqsave (& watchdog_lock , flags );
379
392
if (cs != watchdog ) {
380
393
if (cs -> flags & CLOCK_SOURCE_MUST_VERIFY ) {
381
394
/* cs is a watched clocksource. */
@@ -384,21 +397,19 @@ static void clocksource_dequeue_watchdog(struct clocksource *cs)
384
397
clocksource_stop_watchdog ();
385
398
}
386
399
}
387
- spin_unlock_irqrestore (& watchdog_lock , flags );
388
400
}
389
401
390
402
static int __clocksource_watchdog_kthread (void )
391
403
{
392
404
struct clocksource * cs , * tmp ;
393
405
unsigned long flags ;
394
- LIST_HEAD (unstable );
395
406
int select = 0 ;
396
407
397
408
spin_lock_irqsave (& watchdog_lock , flags );
398
409
list_for_each_entry_safe (cs , tmp , & watchdog_list , wd_list ) {
399
410
if (cs -> flags & CLOCK_SOURCE_UNSTABLE ) {
400
411
list_del_init (& cs -> wd_list );
401
- list_add ( & cs -> wd_list , & unstable );
412
+ __clocksource_change_rating ( cs , 0 );
402
413
select = 1 ;
403
414
}
404
415
if (cs -> flags & CLOCK_SOURCE_RESELECT ) {
@@ -410,11 +421,6 @@ static int __clocksource_watchdog_kthread(void)
410
421
clocksource_stop_watchdog ();
411
422
spin_unlock_irqrestore (& watchdog_lock , flags );
412
423
413
- /* Needs to be done outside of watchdog lock */
414
- list_for_each_entry_safe (cs , tmp , & unstable , wd_list ) {
415
- list_del_init (& cs -> wd_list );
416
- __clocksource_change_rating (cs , 0 );
417
- }
418
424
return select ;
419
425
}
420
426
@@ -447,6 +453,9 @@ static inline int __clocksource_watchdog_kthread(void) { return 0; }
447
453
static bool clocksource_is_watchdog (struct clocksource * cs ) { return false; }
448
454
void clocksource_mark_unstable (struct clocksource * cs ) { }
449
455
456
+ static void inline clocksource_watchdog_lock (unsigned long * flags ) { }
457
+ static void inline clocksource_watchdog_unlock (unsigned long * flags ) { }
458
+
450
459
#endif /* CONFIG_CLOCKSOURCE_WATCHDOG */
451
460
452
461
/**
@@ -779,14 +788,19 @@ EXPORT_SYMBOL_GPL(__clocksource_update_freq_scale);
779
788
*/
780
789
int __clocksource_register_scale (struct clocksource * cs , u32 scale , u32 freq )
781
790
{
791
+ unsigned long flags ;
782
792
783
793
/* Initialize mult/shift and max_idle_ns */
784
794
__clocksource_update_freq_scale (cs , scale , freq );
785
795
786
796
/* Add clocksource to the clocksource list */
787
797
mutex_lock (& clocksource_mutex );
798
+
799
+ clocksource_watchdog_lock (& flags );
788
800
clocksource_enqueue (cs );
789
801
clocksource_enqueue_watchdog (cs );
802
+ clocksource_watchdog_unlock (& flags );
803
+
790
804
clocksource_select ();
791
805
clocksource_select_watchdog (false);
792
806
mutex_unlock (& clocksource_mutex );
@@ -808,8 +822,13 @@ static void __clocksource_change_rating(struct clocksource *cs, int rating)
808
822
*/
809
823
void clocksource_change_rating (struct clocksource * cs , int rating )
810
824
{
825
+ unsigned long flags ;
826
+
811
827
mutex_lock (& clocksource_mutex );
828
+ clocksource_watchdog_lock (& flags );
812
829
__clocksource_change_rating (cs , rating );
830
+ clocksource_watchdog_unlock (& flags );
831
+
813
832
clocksource_select ();
814
833
clocksource_select_watchdog (false);
815
834
mutex_unlock (& clocksource_mutex );
@@ -821,6 +840,8 @@ EXPORT_SYMBOL(clocksource_change_rating);
821
840
*/
822
841
static int clocksource_unbind (struct clocksource * cs )
823
842
{
843
+ unsigned long flags ;
844
+
824
845
if (clocksource_is_watchdog (cs )) {
825
846
/* Select and try to install a replacement watchdog. */
826
847
clocksource_select_watchdog (true);
@@ -834,8 +855,12 @@ static int clocksource_unbind(struct clocksource *cs)
834
855
if (curr_clocksource == cs )
835
856
return - EBUSY ;
836
857
}
858
+
859
+ clocksource_watchdog_lock (& flags );
837
860
clocksource_dequeue_watchdog (cs );
838
861
list_del_init (& cs -> list );
862
+ clocksource_watchdog_unlock (& flags );
863
+
839
864
return 0 ;
840
865
}
841
866
0 commit comments