Skip to content

Commit 494af3e

Browse files
KAGA-KOKOIngo Molnar
authored andcommitted
timers: Give a few structs and members proper names
Some of the names in the internal implementation of the timer code are not longer correct and others are simply too long to type. Clean it up before we switch the wheel implementation over to the new scheme. No functional change. Signed-off-by: Thomas Gleixner <[email protected]> Reviewed-by: Frederic Weisbecker <[email protected]> Cc: Arjan van de Ven <[email protected]> Cc: Chris Mason <[email protected]> Cc: Eric Dumazet <[email protected]> Cc: George Spelvin <[email protected]> Cc: Josh Triplett <[email protected]> Cc: Len Brown <[email protected]> Cc: Linus Torvalds <[email protected]> Cc: Paul E. McKenney <[email protected]> Cc: Peter Zijlstra <[email protected]> Cc: Rik van Riel <[email protected]> Cc: [email protected] Link: http://lkml.kernel.org/r/[email protected] Signed-off-by: Ingo Molnar <[email protected]>
1 parent 15dba1e commit 494af3e

File tree

1 file changed

+59
-59
lines changed

1 file changed

+59
-59
lines changed

kernel/time/timer.c

Lines changed: 59 additions & 59 deletions
Original file line numberDiff line numberDiff line change
@@ -77,10 +77,10 @@ struct tvec_root {
7777
struct hlist_head vec[TVR_SIZE];
7878
};
7979

80-
struct tvec_base {
80+
struct timer_base {
8181
spinlock_t lock;
8282
struct timer_list *running_timer;
83-
unsigned long timer_jiffies;
83+
unsigned long clk;
8484
unsigned long next_timer;
8585
unsigned long active_timers;
8686
unsigned long all_timers;
@@ -95,7 +95,7 @@ struct tvec_base {
9595
} ____cacheline_aligned;
9696

9797

98-
static DEFINE_PER_CPU(struct tvec_base, tvec_bases);
98+
static DEFINE_PER_CPU(struct timer_base, timer_bases);
9999

100100
#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON)
101101
unsigned int sysctl_timer_migration = 1;
@@ -106,15 +106,15 @@ void timers_update_migration(bool update_nohz)
106106
unsigned int cpu;
107107

108108
/* Avoid the loop, if nothing to update */
109-
if (this_cpu_read(tvec_bases.migration_enabled) == on)
109+
if (this_cpu_read(timer_bases.migration_enabled) == on)
110110
return;
111111

112112
for_each_possible_cpu(cpu) {
113-
per_cpu(tvec_bases.migration_enabled, cpu) = on;
113+
per_cpu(timer_bases.migration_enabled, cpu) = on;
114114
per_cpu(hrtimer_bases.migration_enabled, cpu) = on;
115115
if (!update_nohz)
116116
continue;
117-
per_cpu(tvec_bases.nohz_active, cpu) = true;
117+
per_cpu(timer_bases.nohz_active, cpu) = true;
118118
per_cpu(hrtimer_bases.nohz_active, cpu) = true;
119119
}
120120
}
@@ -134,18 +134,18 @@ int timer_migration_handler(struct ctl_table *table, int write,
134134
return ret;
135135
}
136136

137-
static inline struct tvec_base *get_target_base(struct tvec_base *base,
137+
static inline struct timer_base *get_target_base(struct timer_base *base,
138138
int pinned)
139139
{
140140
if (pinned || !base->migration_enabled)
141-
return this_cpu_ptr(&tvec_bases);
142-
return per_cpu_ptr(&tvec_bases, get_nohz_timer_target());
141+
return this_cpu_ptr(&timer_bases);
142+
return per_cpu_ptr(&timer_bases, get_nohz_timer_target());
143143
}
144144
#else
145-
static inline struct tvec_base *get_target_base(struct tvec_base *base,
145+
static inline struct timer_base *get_target_base(struct timer_base *base,
146146
int pinned)
147147
{
148-
return this_cpu_ptr(&tvec_bases);
148+
return this_cpu_ptr(&timer_bases);
149149
}
150150
#endif
151151

@@ -371,10 +371,10 @@ void set_timer_slack(struct timer_list *timer, int slack_hz)
371371
EXPORT_SYMBOL_GPL(set_timer_slack);
372372

373373
static void
374-
__internal_add_timer(struct tvec_base *base, struct timer_list *timer)
374+
__internal_add_timer(struct timer_base *base, struct timer_list *timer)
375375
{
376376
unsigned long expires = timer->expires;
377-
unsigned long idx = expires - base->timer_jiffies;
377+
unsigned long idx = expires - base->clk;
378378
struct hlist_head *vec;
379379

380380
if (idx < TVR_SIZE) {
@@ -394,7 +394,7 @@ __internal_add_timer(struct tvec_base *base, struct timer_list *timer)
394394
* Can happen if you add a timer with expires == jiffies,
395395
* or you set a timer to go off in the past
396396
*/
397-
vec = base->tv1.vec + (base->timer_jiffies & TVR_MASK);
397+
vec = base->tv1.vec + (base->clk & TVR_MASK);
398398
} else {
399399
int i;
400400
/* If the timeout is larger than MAX_TVAL (on 64-bit
@@ -403,7 +403,7 @@ __internal_add_timer(struct tvec_base *base, struct timer_list *timer)
403403
*/
404404
if (idx > MAX_TVAL) {
405405
idx = MAX_TVAL;
406-
expires = idx + base->timer_jiffies;
406+
expires = idx + base->clk;
407407
}
408408
i = (expires >> (TVR_BITS + 3 * TVN_BITS)) & TVN_MASK;
409409
vec = base->tv5.vec + i;
@@ -412,11 +412,11 @@ __internal_add_timer(struct tvec_base *base, struct timer_list *timer)
412412
hlist_add_head(&timer->entry, vec);
413413
}
414414

415-
static void internal_add_timer(struct tvec_base *base, struct timer_list *timer)
415+
static void internal_add_timer(struct timer_base *base, struct timer_list *timer)
416416
{
417417
/* Advance base->jiffies, if the base is empty */
418418
if (!base->all_timers++)
419-
base->timer_jiffies = jiffies;
419+
base->clk = jiffies;
420420

421421
__internal_add_timer(base, timer);
422422
/*
@@ -707,15 +707,15 @@ static inline void detach_timer(struct timer_list *timer, bool clear_pending)
707707
}
708708

709709
static inline void
710-
detach_expired_timer(struct timer_list *timer, struct tvec_base *base)
710+
detach_expired_timer(struct timer_list *timer, struct timer_base *base)
711711
{
712712
detach_timer(timer, true);
713713
if (!(timer->flags & TIMER_DEFERRABLE))
714714
base->active_timers--;
715715
base->all_timers--;
716716
}
717717

718-
static int detach_if_pending(struct timer_list *timer, struct tvec_base *base,
718+
static int detach_if_pending(struct timer_list *timer, struct timer_base *base,
719719
bool clear_pending)
720720
{
721721
if (!timer_pending(timer))
@@ -725,16 +725,16 @@ static int detach_if_pending(struct timer_list *timer, struct tvec_base *base,
725725
if (!(timer->flags & TIMER_DEFERRABLE)) {
726726
base->active_timers--;
727727
if (timer->expires == base->next_timer)
728-
base->next_timer = base->timer_jiffies;
728+
base->next_timer = base->clk;
729729
}
730730
/* If this was the last timer, advance base->jiffies */
731731
if (!--base->all_timers)
732-
base->timer_jiffies = jiffies;
732+
base->clk = jiffies;
733733
return 1;
734734
}
735735

736736
/*
737-
* We are using hashed locking: holding per_cpu(tvec_bases).lock
737+
* We are using hashed locking: holding per_cpu(timer_bases).lock
738738
* means that all timers which are tied to this base via timer->base are
739739
* locked, and the base itself is locked too.
740740
*
@@ -744,16 +744,16 @@ static int detach_if_pending(struct timer_list *timer, struct tvec_base *base,
744744
* When the timer's base is locked and removed from the list, the
745745
* TIMER_MIGRATING flag is set, FIXME
746746
*/
747-
static struct tvec_base *lock_timer_base(struct timer_list *timer,
747+
static struct timer_base *lock_timer_base(struct timer_list *timer,
748748
unsigned long *flags)
749749
__acquires(timer->base->lock)
750750
{
751751
for (;;) {
752752
u32 tf = timer->flags;
753-
struct tvec_base *base;
753+
struct timer_base *base;
754754

755755
if (!(tf & TIMER_MIGRATING)) {
756-
base = per_cpu_ptr(&tvec_bases, tf & TIMER_CPUMASK);
756+
base = per_cpu_ptr(&timer_bases, tf & TIMER_CPUMASK);
757757
spin_lock_irqsave(&base->lock, *flags);
758758
if (timer->flags == tf)
759759
return base;
@@ -766,7 +766,7 @@ static struct tvec_base *lock_timer_base(struct timer_list *timer,
766766
static inline int
767767
__mod_timer(struct timer_list *timer, unsigned long expires, bool pending_only)
768768
{
769-
struct tvec_base *base, *new_base;
769+
struct timer_base *base, *new_base;
770770
unsigned long flags;
771771
int ret = 0;
772772

@@ -933,8 +933,8 @@ EXPORT_SYMBOL(add_timer);
933933
*/
934934
void add_timer_on(struct timer_list *timer, int cpu)
935935
{
936-
struct tvec_base *new_base = per_cpu_ptr(&tvec_bases, cpu);
937-
struct tvec_base *base;
936+
struct timer_base *new_base = per_cpu_ptr(&timer_bases, cpu);
937+
struct timer_base *base;
938938
unsigned long flags;
939939

940940
timer_stats_timer_set_start_info(timer);
@@ -975,7 +975,7 @@ EXPORT_SYMBOL_GPL(add_timer_on);
975975
*/
976976
int del_timer(struct timer_list *timer)
977977
{
978-
struct tvec_base *base;
978+
struct timer_base *base;
979979
unsigned long flags;
980980
int ret = 0;
981981

@@ -1001,7 +1001,7 @@ EXPORT_SYMBOL(del_timer);
10011001
*/
10021002
int try_to_del_timer_sync(struct timer_list *timer)
10031003
{
1004-
struct tvec_base *base;
1004+
struct timer_base *base;
10051005
unsigned long flags;
10061006
int ret = -1;
10071007

@@ -1085,7 +1085,7 @@ int del_timer_sync(struct timer_list *timer)
10851085
EXPORT_SYMBOL(del_timer_sync);
10861086
#endif
10871087

1088-
static int cascade(struct tvec_base *base, struct tvec *tv, int index)
1088+
static int cascade(struct timer_base *base, struct tvec *tv, int index)
10891089
{
10901090
/* cascade all the timers from tv up one level */
10911091
struct timer_list *timer;
@@ -1149,7 +1149,7 @@ static void call_timer_fn(struct timer_list *timer, void (*fn)(unsigned long),
11491149
}
11501150
}
11511151

1152-
#define INDEX(N) ((base->timer_jiffies >> (TVR_BITS + (N) * TVN_BITS)) & TVN_MASK)
1152+
#define INDEX(N) ((base->clk >> (TVR_BITS + (N) * TVN_BITS)) & TVN_MASK)
11531153

11541154
/**
11551155
* __run_timers - run all expired timers (if any) on this CPU.
@@ -1158,23 +1158,23 @@ static void call_timer_fn(struct timer_list *timer, void (*fn)(unsigned long),
11581158
* This function cascades all vectors and executes all expired timer
11591159
* vectors.
11601160
*/
1161-
static inline void __run_timers(struct tvec_base *base)
1161+
static inline void __run_timers(struct timer_base *base)
11621162
{
11631163
struct timer_list *timer;
11641164

11651165
spin_lock_irq(&base->lock);
11661166

1167-
while (time_after_eq(jiffies, base->timer_jiffies)) {
1167+
while (time_after_eq(jiffies, base->clk)) {
11681168
struct hlist_head work_list;
11691169
struct hlist_head *head = &work_list;
11701170
int index;
11711171

11721172
if (!base->all_timers) {
1173-
base->timer_jiffies = jiffies;
1173+
base->clk = jiffies;
11741174
break;
11751175
}
11761176

1177-
index = base->timer_jiffies & TVR_MASK;
1177+
index = base->clk & TVR_MASK;
11781178

11791179
/*
11801180
* Cascade timers:
@@ -1184,7 +1184,7 @@ static inline void __run_timers(struct tvec_base *base)
11841184
(!cascade(base, &base->tv3, INDEX(1))) &&
11851185
!cascade(base, &base->tv4, INDEX(2)))
11861186
cascade(base, &base->tv5, INDEX(3));
1187-
++base->timer_jiffies;
1187+
++base->clk;
11881188
hlist_move_list(base->tv1.vec + index, head);
11891189
while (!hlist_empty(head)) {
11901190
void (*fn)(unsigned long);
@@ -1222,16 +1222,16 @@ static inline void __run_timers(struct tvec_base *base)
12221222
* is used on S/390 to stop all activity when a CPU is idle.
12231223
* This function needs to be called with interrupts disabled.
12241224
*/
1225-
static unsigned long __next_timer_interrupt(struct tvec_base *base)
1225+
static unsigned long __next_timer_interrupt(struct timer_base *base)
12261226
{
1227-
unsigned long timer_jiffies = base->timer_jiffies;
1228-
unsigned long expires = timer_jiffies + NEXT_TIMER_MAX_DELTA;
1227+
unsigned long clk = base->clk;
1228+
unsigned long expires = clk + NEXT_TIMER_MAX_DELTA;
12291229
int index, slot, array, found = 0;
12301230
struct timer_list *nte;
12311231
struct tvec *varray[4];
12321232

12331233
/* Look for timer events in tv1. */
1234-
index = slot = timer_jiffies & TVR_MASK;
1234+
index = slot = clk & TVR_MASK;
12351235
do {
12361236
hlist_for_each_entry(nte, base->tv1.vec + slot, entry) {
12371237
if (nte->flags & TIMER_DEFERRABLE)
@@ -1250,8 +1250,8 @@ static unsigned long __next_timer_interrupt(struct tvec_base *base)
12501250
cascade:
12511251
/* Calculate the next cascade event */
12521252
if (index)
1253-
timer_jiffies += TVR_SIZE - index;
1254-
timer_jiffies >>= TVR_BITS;
1253+
clk += TVR_SIZE - index;
1254+
clk >>= TVR_BITS;
12551255

12561256
/* Check tv2-tv5. */
12571257
varray[0] = &base->tv2;
@@ -1262,7 +1262,7 @@ static unsigned long __next_timer_interrupt(struct tvec_base *base)
12621262
for (array = 0; array < 4; array++) {
12631263
struct tvec *varp = varray[array];
12641264

1265-
index = slot = timer_jiffies & TVN_MASK;
1265+
index = slot = clk & TVN_MASK;
12661266
do {
12671267
hlist_for_each_entry(nte, varp->vec + slot, entry) {
12681268
if (nte->flags & TIMER_DEFERRABLE)
@@ -1286,8 +1286,8 @@ static unsigned long __next_timer_interrupt(struct tvec_base *base)
12861286
} while (slot != index);
12871287

12881288
if (index)
1289-
timer_jiffies += TVN_SIZE - index;
1290-
timer_jiffies >>= TVN_BITS;
1289+
clk += TVN_SIZE - index;
1290+
clk >>= TVN_BITS;
12911291
}
12921292
return expires;
12931293
}
@@ -1335,7 +1335,7 @@ static u64 cmp_next_hrtimer_event(u64 basem, u64 expires)
13351335
*/
13361336
u64 get_next_timer_interrupt(unsigned long basej, u64 basem)
13371337
{
1338-
struct tvec_base *base = this_cpu_ptr(&tvec_bases);
1338+
struct timer_base *base = this_cpu_ptr(&timer_bases);
13391339
u64 expires = KTIME_MAX;
13401340
unsigned long nextevt;
13411341

@@ -1348,7 +1348,7 @@ u64 get_next_timer_interrupt(unsigned long basej, u64 basem)
13481348

13491349
spin_lock(&base->lock);
13501350
if (base->active_timers) {
1351-
if (time_before_eq(base->next_timer, base->timer_jiffies))
1351+
if (time_before_eq(base->next_timer, base->clk))
13521352
base->next_timer = __next_timer_interrupt(base);
13531353
nextevt = base->next_timer;
13541354
if (time_before_eq(nextevt, basej))
@@ -1387,9 +1387,9 @@ void update_process_times(int user_tick)
13871387
*/
13881388
static void run_timer_softirq(struct softirq_action *h)
13891389
{
1390-
struct tvec_base *base = this_cpu_ptr(&tvec_bases);
1390+
struct timer_base *base = this_cpu_ptr(&timer_bases);
13911391

1392-
if (time_after_eq(jiffies, base->timer_jiffies))
1392+
if (time_after_eq(jiffies, base->clk))
13931393
__run_timers(base);
13941394
}
13951395

@@ -1534,7 +1534,7 @@ signed long __sched schedule_timeout_idle(signed long timeout)
15341534
EXPORT_SYMBOL(schedule_timeout_idle);
15351535

15361536
#ifdef CONFIG_HOTPLUG_CPU
1537-
static void migrate_timer_list(struct tvec_base *new_base, struct hlist_head *head)
1537+
static void migrate_timer_list(struct timer_base *new_base, struct hlist_head *head)
15381538
{
15391539
struct timer_list *timer;
15401540
int cpu = new_base->cpu;
@@ -1550,13 +1550,13 @@ static void migrate_timer_list(struct tvec_base *new_base, struct hlist_head *he
15501550

15511551
static void migrate_timers(int cpu)
15521552
{
1553-
struct tvec_base *old_base;
1554-
struct tvec_base *new_base;
1553+
struct timer_base *old_base;
1554+
struct timer_base *new_base;
15551555
int i;
15561556

15571557
BUG_ON(cpu_online(cpu));
1558-
old_base = per_cpu_ptr(&tvec_bases, cpu);
1559-
new_base = get_cpu_ptr(&tvec_bases);
1558+
old_base = per_cpu_ptr(&timer_bases, cpu);
1559+
new_base = get_cpu_ptr(&timer_bases);
15601560
/*
15611561
* The caller is globally serialized and nobody else
15621562
* takes two locks at once, deadlock is not possible.
@@ -1580,7 +1580,7 @@ static void migrate_timers(int cpu)
15801580

15811581
spin_unlock(&old_base->lock);
15821582
spin_unlock_irq(&new_base->lock);
1583-
put_cpu_ptr(&tvec_bases);
1583+
put_cpu_ptr(&timer_bases);
15841584
}
15851585

15861586
static int timer_cpu_notify(struct notifier_block *self,
@@ -1608,13 +1608,13 @@ static inline void timer_register_cpu_notifier(void) { }
16081608

16091609
static void __init init_timer_cpu(int cpu)
16101610
{
1611-
struct tvec_base *base = per_cpu_ptr(&tvec_bases, cpu);
1611+
struct timer_base *base = per_cpu_ptr(&timer_bases, cpu);
16121612

16131613
base->cpu = cpu;
16141614
spin_lock_init(&base->lock);
16151615

1616-
base->timer_jiffies = jiffies;
1617-
base->next_timer = base->timer_jiffies;
1616+
base->clk = jiffies;
1617+
base->next_timer = base->clk;
16181618
}
16191619

16201620
static void __init init_timer_cpus(void)

0 commit comments

Comments
 (0)