Skip to content

Commit 3567994

Browse files
committed
Merge branch 'timers-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull timekeeping fixes from Thomas Gleixner: "Two fixes for timekeeping: - Revert to the previous kthread based update, which is unfortunately required due to lock ordering issues. The removal caused boot failures on old Core2 machines. Add a proper comment why the thread needs to stay to prevent accidental removal in the future. - Fix a silly typo in a function declaration" * 'timers-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: clocksource: Revert "Remove kthread" timekeeping: Fix declaration of read_persistent_wall_and_boot_offset()
2 parents 225ad3c + e2c631b commit 3567994

File tree

2 files changed

+32
-12
lines changed

2 files changed

+32
-12
lines changed

include/linux/timekeeping.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -258,8 +258,8 @@ extern void ktime_get_snapshot(struct system_time_snapshot *systime_snapshot);
258258
extern int persistent_clock_is_local;
259259

260260
extern void read_persistent_clock64(struct timespec64 *ts);
261-
void read_persistent_clock_and_boot_offset(struct timespec64 *wall_clock,
262-
struct timespec64 *boot_offset);
261+
void read_persistent_wall_and_boot_offset(struct timespec64 *wall_clock,
262+
struct timespec64 *boot_offset);
263263
extern int update_persistent_clock64(struct timespec64 now);
264264

265265
/*

kernel/time/clocksource.c

Lines changed: 30 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -133,19 +133,40 @@ static void inline clocksource_watchdog_unlock(unsigned long *flags)
133133
spin_unlock_irqrestore(&watchdog_lock, *flags);
134134
}
135135

136+
static int clocksource_watchdog_kthread(void *data);
137+
static void __clocksource_change_rating(struct clocksource *cs, int rating);
138+
136139
/*
137140
* Interval: 0.5sec Threshold: 0.0625s
138141
*/
139142
#define WATCHDOG_INTERVAL (HZ >> 1)
140143
#define WATCHDOG_THRESHOLD (NSEC_PER_SEC >> 4)
141144

145+
static void clocksource_watchdog_work(struct work_struct *work)
146+
{
147+
/*
148+
* We cannot directly run clocksource_watchdog_kthread() here, because
149+
* clocksource_select() calls timekeeping_notify() which uses
150+
* stop_machine(). One cannot use stop_machine() from a workqueue() due
151+
* lock inversions wrt CPU hotplug.
152+
*
153+
* Also, we only ever run this work once or twice during the lifetime
154+
* of the kernel, so there is no point in creating a more permanent
155+
* kthread for this.
156+
*
157+
* If kthread_run fails the next watchdog scan over the
158+
* watchdog_list will find the unstable clock again.
159+
*/
160+
kthread_run(clocksource_watchdog_kthread, NULL, "kwatchdog");
161+
}
162+
142163
static void __clocksource_unstable(struct clocksource *cs)
143164
{
144165
cs->flags &= ~(CLOCK_SOURCE_VALID_FOR_HRES | CLOCK_SOURCE_WATCHDOG);
145166
cs->flags |= CLOCK_SOURCE_UNSTABLE;
146167

147168
/*
148-
* If the clocksource is registered clocksource_watchdog_work() will
169+
* If the clocksource is registered clocksource_watchdog_kthread() will
149170
* re-rate and re-select.
150171
*/
151172
if (list_empty(&cs->list)) {
@@ -156,7 +177,7 @@ static void __clocksource_unstable(struct clocksource *cs)
156177
if (cs->mark_unstable)
157178
cs->mark_unstable(cs);
158179

159-
/* kick clocksource_watchdog_work() */
180+
/* kick clocksource_watchdog_kthread() */
160181
if (finished_booting)
161182
schedule_work(&watchdog_work);
162183
}
@@ -166,7 +187,7 @@ static void __clocksource_unstable(struct clocksource *cs)
166187
* @cs: clocksource to be marked unstable
167188
*
168189
* This function is called by the x86 TSC code to mark clocksources as unstable;
169-
* it defers demotion and re-selection to a work.
190+
* it defers demotion and re-selection to a kthread.
170191
*/
171192
void clocksource_mark_unstable(struct clocksource *cs)
172193
{
@@ -391,9 +412,7 @@ static void clocksource_dequeue_watchdog(struct clocksource *cs)
391412
}
392413
}
393414

394-
static void __clocksource_change_rating(struct clocksource *cs, int rating);
395-
396-
static int __clocksource_watchdog_work(void)
415+
static int __clocksource_watchdog_kthread(void)
397416
{
398417
struct clocksource *cs, *tmp;
399418
unsigned long flags;
@@ -418,12 +437,13 @@ static int __clocksource_watchdog_work(void)
418437
return select;
419438
}
420439

421-
static void clocksource_watchdog_work(struct work_struct *work)
440+
static int clocksource_watchdog_kthread(void *data)
422441
{
423442
mutex_lock(&clocksource_mutex);
424-
if (__clocksource_watchdog_work())
443+
if (__clocksource_watchdog_kthread())
425444
clocksource_select();
426445
mutex_unlock(&clocksource_mutex);
446+
return 0;
427447
}
428448

429449
static bool clocksource_is_watchdog(struct clocksource *cs)
@@ -442,7 +462,7 @@ static void clocksource_enqueue_watchdog(struct clocksource *cs)
442462
static void clocksource_select_watchdog(bool fallback) { }
443463
static inline void clocksource_dequeue_watchdog(struct clocksource *cs) { }
444464
static inline void clocksource_resume_watchdog(void) { }
445-
static inline int __clocksource_watchdog_work(void) { return 0; }
465+
static inline int __clocksource_watchdog_kthread(void) { return 0; }
446466
static bool clocksource_is_watchdog(struct clocksource *cs) { return false; }
447467
void clocksource_mark_unstable(struct clocksource *cs) { }
448468

@@ -810,7 +830,7 @@ static int __init clocksource_done_booting(void)
810830
/*
811831
* Run the watchdog first to eliminate unstable clock sources
812832
*/
813-
__clocksource_watchdog_work();
833+
__clocksource_watchdog_kthread();
814834
clocksource_select();
815835
mutex_unlock(&clocksource_mutex);
816836
return 0;

0 commit comments

Comments
 (0)