Skip to content

Commit 2aae7bc

Browse files
Peter ZijlstraKAGA-KOKO
authored andcommitted
clocksource: Allow clocksource_mark_unstable() on unregistered clocksources
Because of how the code flips between tsc-early and tsc clocksources it might need to mark one or both unstable. The current code in mark_tsc_unstable() only worked because previously it registered the tsc clocksource once and then never touched it. Since it now unregisters the tsc-early clocksource, it needs to know if a clocksource got unregistered and the current cs->mult test doesn't work for that. Instead use list_empty(&cs->list) to test for registration. Furthermore, since clocksource_mark_unstable() needs to place the cs on the wd_list, it links the cs->list and cs->wd_list serialization. It must not see a clocsource registered (!empty cs->list) but already past dequeue_watchdog(). So place {en,de}queue{,_watchdog}() under the same lock. Provided cs->list is initialized to empty, this then allows us to unconditionally use clocksource_mark_unstable(), regardless of the registration state. Fixes: aa83c45 ("x86/tsc: Introduce early tsc clocksource") Signed-off-by: Peter Zijlstra (Intel) <[email protected]> Signed-off-by: Thomas Gleixner <[email protected]> Reviewed-by: Rafael J. Wysocki <[email protected]> Tested-by: Diego Viola <[email protected]> Cc: [email protected] Cc: [email protected] Cc: [email protected] Cc: [email protected] Cc: [email protected] Link: https://lkml.kernel.org/r/[email protected]
1 parent e9088ad commit 2aae7bc

File tree

1 file changed

+34
-16
lines changed

1 file changed

+34
-16
lines changed

kernel/time/clocksource.c

Lines changed: 34 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -119,6 +119,16 @@ static DEFINE_SPINLOCK(watchdog_lock);
119119
static int watchdog_running;
120120
static atomic_t watchdog_reset_pending;
121121

122+
static void inline clocksource_watchdog_lock(unsigned long *flags)
123+
{
124+
spin_lock_irqsave(&watchdog_lock, *flags);
125+
}
126+
127+
static void inline clocksource_watchdog_unlock(unsigned long *flags)
128+
{
129+
spin_unlock_irqrestore(&watchdog_lock, *flags);
130+
}
131+
122132
static int clocksource_watchdog_kthread(void *data);
123133
static void __clocksource_change_rating(struct clocksource *cs, int rating);
124134

@@ -142,6 +152,9 @@ static void __clocksource_unstable(struct clocksource *cs)
142152
cs->flags &= ~(CLOCK_SOURCE_VALID_FOR_HRES | CLOCK_SOURCE_WATCHDOG);
143153
cs->flags |= CLOCK_SOURCE_UNSTABLE;
144154

155+
if (list_empty(&cs->list))
156+
return;
157+
145158
if (cs->mark_unstable)
146159
cs->mark_unstable(cs);
147160

@@ -164,7 +177,7 @@ void clocksource_mark_unstable(struct clocksource *cs)
164177

165178
spin_lock_irqsave(&watchdog_lock, flags);
166179
if (!(cs->flags & CLOCK_SOURCE_UNSTABLE)) {
167-
if (list_empty(&cs->wd_list))
180+
if (!list_empty(&cs->list) && list_empty(&cs->wd_list))
168181
list_add(&cs->wd_list, &watchdog_list);
169182
__clocksource_unstable(cs);
170183
}
@@ -319,9 +332,6 @@ static void clocksource_resume_watchdog(void)
319332

320333
static void clocksource_enqueue_watchdog(struct clocksource *cs)
321334
{
322-
unsigned long flags;
323-
324-
spin_lock_irqsave(&watchdog_lock, flags);
325335
if (cs->flags & CLOCK_SOURCE_MUST_VERIFY) {
326336
/* cs is a clocksource to be watched. */
327337
list_add(&cs->wd_list, &watchdog_list);
@@ -331,7 +341,6 @@ static void clocksource_enqueue_watchdog(struct clocksource *cs)
331341
if (cs->flags & CLOCK_SOURCE_IS_CONTINUOUS)
332342
cs->flags |= CLOCK_SOURCE_VALID_FOR_HRES;
333343
}
334-
spin_unlock_irqrestore(&watchdog_lock, flags);
335344
}
336345

337346
static void clocksource_select_watchdog(bool fallback)
@@ -373,9 +382,6 @@ static void clocksource_select_watchdog(bool fallback)
373382

374383
static void clocksource_dequeue_watchdog(struct clocksource *cs)
375384
{
376-
unsigned long flags;
377-
378-
spin_lock_irqsave(&watchdog_lock, flags);
379385
if (cs != watchdog) {
380386
if (cs->flags & CLOCK_SOURCE_MUST_VERIFY) {
381387
/* cs is a watched clocksource. */
@@ -384,21 +390,19 @@ static void clocksource_dequeue_watchdog(struct clocksource *cs)
384390
clocksource_stop_watchdog();
385391
}
386392
}
387-
spin_unlock_irqrestore(&watchdog_lock, flags);
388393
}
389394

390395
static int __clocksource_watchdog_kthread(void)
391396
{
392397
struct clocksource *cs, *tmp;
393398
unsigned long flags;
394-
LIST_HEAD(unstable);
395399
int select = 0;
396400

397401
spin_lock_irqsave(&watchdog_lock, flags);
398402
list_for_each_entry_safe(cs, tmp, &watchdog_list, wd_list) {
399403
if (cs->flags & CLOCK_SOURCE_UNSTABLE) {
400404
list_del_init(&cs->wd_list);
401-
list_add(&cs->wd_list, &unstable);
405+
__clocksource_change_rating(cs, 0);
402406
select = 1;
403407
}
404408
if (cs->flags & CLOCK_SOURCE_RESELECT) {
@@ -410,11 +414,6 @@ static int __clocksource_watchdog_kthread(void)
410414
clocksource_stop_watchdog();
411415
spin_unlock_irqrestore(&watchdog_lock, flags);
412416

413-
/* Needs to be done outside of watchdog lock */
414-
list_for_each_entry_safe(cs, tmp, &unstable, wd_list) {
415-
list_del_init(&cs->wd_list);
416-
__clocksource_change_rating(cs, 0);
417-
}
418417
return select;
419418
}
420419

@@ -447,6 +446,9 @@ static inline int __clocksource_watchdog_kthread(void) { return 0; }
447446
static bool clocksource_is_watchdog(struct clocksource *cs) { return false; }
448447
void clocksource_mark_unstable(struct clocksource *cs) { }
449448

449+
static void inline clocksource_watchdog_lock(unsigned long *flags) { }
450+
static void inline clocksource_watchdog_unlock(unsigned long *flags) { }
451+
450452
#endif /* CONFIG_CLOCKSOURCE_WATCHDOG */
451453

452454
/**
@@ -779,14 +781,19 @@ EXPORT_SYMBOL_GPL(__clocksource_update_freq_scale);
779781
*/
780782
int __clocksource_register_scale(struct clocksource *cs, u32 scale, u32 freq)
781783
{
784+
unsigned long flags;
782785

783786
/* Initialize mult/shift and max_idle_ns */
784787
__clocksource_update_freq_scale(cs, scale, freq);
785788

786789
/* Add clocksource to the clocksource list */
787790
mutex_lock(&clocksource_mutex);
791+
792+
clocksource_watchdog_lock(&flags);
788793
clocksource_enqueue(cs);
789794
clocksource_enqueue_watchdog(cs);
795+
clocksource_watchdog_unlock(&flags);
796+
790797
clocksource_select();
791798
clocksource_select_watchdog(false);
792799
mutex_unlock(&clocksource_mutex);
@@ -808,8 +815,13 @@ static void __clocksource_change_rating(struct clocksource *cs, int rating)
808815
*/
809816
void clocksource_change_rating(struct clocksource *cs, int rating)
810817
{
818+
unsigned long flags;
819+
811820
mutex_lock(&clocksource_mutex);
821+
clocksource_watchdog_lock(&flags);
812822
__clocksource_change_rating(cs, rating);
823+
clocksource_watchdog_unlock(&flags);
824+
813825
clocksource_select();
814826
clocksource_select_watchdog(false);
815827
mutex_unlock(&clocksource_mutex);
@@ -821,6 +833,8 @@ EXPORT_SYMBOL(clocksource_change_rating);
821833
*/
822834
static int clocksource_unbind(struct clocksource *cs)
823835
{
836+
unsigned long flags;
837+
824838
if (clocksource_is_watchdog(cs)) {
825839
/* Select and try to install a replacement watchdog. */
826840
clocksource_select_watchdog(true);
@@ -834,8 +848,12 @@ static int clocksource_unbind(struct clocksource *cs)
834848
if (curr_clocksource == cs)
835849
return -EBUSY;
836850
}
851+
852+
clocksource_watchdog_lock(&flags);
837853
clocksource_dequeue_watchdog(cs);
838854
list_del_init(&cs->list);
855+
clocksource_watchdog_unlock(&flags);
856+
839857
return 0;
840858
}
841859

0 commit comments

Comments
 (0)