@@ -77,10 +77,10 @@ struct tvec_root {
77
77
struct hlist_head vec [TVR_SIZE ];
78
78
};
79
79
80
- struct tvec_base {
80
+ struct timer_base {
81
81
spinlock_t lock ;
82
82
struct timer_list * running_timer ;
83
- unsigned long timer_jiffies ;
83
+ unsigned long clk ;
84
84
unsigned long next_timer ;
85
85
unsigned long active_timers ;
86
86
unsigned long all_timers ;
@@ -95,7 +95,7 @@ struct tvec_base {
95
95
} ____cacheline_aligned ;
96
96
97
97
98
- static DEFINE_PER_CPU (struct tvec_base , tvec_bases ) ;
98
+ static DEFINE_PER_CPU (struct timer_base , timer_bases ) ;
99
99
100
100
#if defined(CONFIG_SMP ) && defined(CONFIG_NO_HZ_COMMON )
101
101
unsigned int sysctl_timer_migration = 1 ;
@@ -106,15 +106,15 @@ void timers_update_migration(bool update_nohz)
106
106
unsigned int cpu ;
107
107
108
108
/* Avoid the loop, if nothing to update */
109
- if (this_cpu_read (tvec_bases .migration_enabled ) == on )
109
+ if (this_cpu_read (timer_bases .migration_enabled ) == on )
110
110
return ;
111
111
112
112
for_each_possible_cpu (cpu ) {
113
- per_cpu (tvec_bases .migration_enabled , cpu ) = on ;
113
+ per_cpu (timer_bases .migration_enabled , cpu ) = on ;
114
114
per_cpu (hrtimer_bases .migration_enabled , cpu ) = on ;
115
115
if (!update_nohz )
116
116
continue ;
117
- per_cpu (tvec_bases .nohz_active , cpu ) = true;
117
+ per_cpu (timer_bases .nohz_active , cpu ) = true;
118
118
per_cpu (hrtimer_bases .nohz_active , cpu ) = true;
119
119
}
120
120
}
@@ -134,18 +134,18 @@ int timer_migration_handler(struct ctl_table *table, int write,
134
134
return ret ;
135
135
}
136
136
137
- static inline struct tvec_base * get_target_base (struct tvec_base * base ,
137
+ static inline struct timer_base * get_target_base (struct timer_base * base ,
138
138
int pinned )
139
139
{
140
140
if (pinned || !base -> migration_enabled )
141
- return this_cpu_ptr (& tvec_bases );
142
- return per_cpu_ptr (& tvec_bases , get_nohz_timer_target ());
141
+ return this_cpu_ptr (& timer_bases );
142
+ return per_cpu_ptr (& timer_bases , get_nohz_timer_target ());
143
143
}
144
144
#else
145
- static inline struct tvec_base * get_target_base (struct tvec_base * base ,
145
+ static inline struct timer_base * get_target_base (struct timer_base * base ,
146
146
int pinned )
147
147
{
148
- return this_cpu_ptr (& tvec_bases );
148
+ return this_cpu_ptr (& timer_bases );
149
149
}
150
150
#endif
151
151
@@ -371,10 +371,10 @@ void set_timer_slack(struct timer_list *timer, int slack_hz)
371
371
EXPORT_SYMBOL_GPL (set_timer_slack );
372
372
373
373
static void
374
- __internal_add_timer (struct tvec_base * base , struct timer_list * timer )
374
+ __internal_add_timer (struct timer_base * base , struct timer_list * timer )
375
375
{
376
376
unsigned long expires = timer -> expires ;
377
- unsigned long idx = expires - base -> timer_jiffies ;
377
+ unsigned long idx = expires - base -> clk ;
378
378
struct hlist_head * vec ;
379
379
380
380
if (idx < TVR_SIZE ) {
@@ -394,7 +394,7 @@ __internal_add_timer(struct tvec_base *base, struct timer_list *timer)
394
394
* Can happen if you add a timer with expires == jiffies,
395
395
* or you set a timer to go off in the past
396
396
*/
397
- vec = base -> tv1 .vec + (base -> timer_jiffies & TVR_MASK );
397
+ vec = base -> tv1 .vec + (base -> clk & TVR_MASK );
398
398
} else {
399
399
int i ;
400
400
/* If the timeout is larger than MAX_TVAL (on 64-bit
@@ -403,7 +403,7 @@ __internal_add_timer(struct tvec_base *base, struct timer_list *timer)
403
403
*/
404
404
if (idx > MAX_TVAL ) {
405
405
idx = MAX_TVAL ;
406
- expires = idx + base -> timer_jiffies ;
406
+ expires = idx + base -> clk ;
407
407
}
408
408
i = (expires >> (TVR_BITS + 3 * TVN_BITS )) & TVN_MASK ;
409
409
vec = base -> tv5 .vec + i ;
@@ -412,11 +412,11 @@ __internal_add_timer(struct tvec_base *base, struct timer_list *timer)
412
412
hlist_add_head (& timer -> entry , vec );
413
413
}
414
414
415
- static void internal_add_timer (struct tvec_base * base , struct timer_list * timer )
415
+ static void internal_add_timer (struct timer_base * base , struct timer_list * timer )
416
416
{
417
417
/* Advance base->jiffies, if the base is empty */
418
418
if (!base -> all_timers ++ )
419
- base -> timer_jiffies = jiffies ;
419
+ base -> clk = jiffies ;
420
420
421
421
__internal_add_timer (base , timer );
422
422
/*
@@ -707,15 +707,15 @@ static inline void detach_timer(struct timer_list *timer, bool clear_pending)
707
707
}
708
708
709
709
static inline void
710
- detach_expired_timer (struct timer_list * timer , struct tvec_base * base )
710
+ detach_expired_timer (struct timer_list * timer , struct timer_base * base )
711
711
{
712
712
detach_timer (timer , true);
713
713
if (!(timer -> flags & TIMER_DEFERRABLE ))
714
714
base -> active_timers -- ;
715
715
base -> all_timers -- ;
716
716
}
717
717
718
- static int detach_if_pending (struct timer_list * timer , struct tvec_base * base ,
718
+ static int detach_if_pending (struct timer_list * timer , struct timer_base * base ,
719
719
bool clear_pending )
720
720
{
721
721
if (!timer_pending (timer ))
@@ -725,16 +725,16 @@ static int detach_if_pending(struct timer_list *timer, struct tvec_base *base,
725
725
if (!(timer -> flags & TIMER_DEFERRABLE )) {
726
726
base -> active_timers -- ;
727
727
if (timer -> expires == base -> next_timer )
728
- base -> next_timer = base -> timer_jiffies ;
728
+ base -> next_timer = base -> clk ;
729
729
}
730
730
/* If this was the last timer, advance base->jiffies */
731
731
if (!-- base -> all_timers )
732
- base -> timer_jiffies = jiffies ;
732
+ base -> clk = jiffies ;
733
733
return 1 ;
734
734
}
735
735
736
736
/*
737
- * We are using hashed locking: holding per_cpu(tvec_bases ).lock
737
+ * We are using hashed locking: holding per_cpu(timer_bases ).lock
738
738
* means that all timers which are tied to this base via timer->base are
739
739
* locked, and the base itself is locked too.
740
740
*
@@ -744,16 +744,16 @@ static int detach_if_pending(struct timer_list *timer, struct tvec_base *base,
744
744
* When the timer's base is locked and removed from the list, the
745
745
* TIMER_MIGRATING flag is set, FIXME
746
746
*/
747
- static struct tvec_base * lock_timer_base (struct timer_list * timer ,
747
+ static struct timer_base * lock_timer_base (struct timer_list * timer ,
748
748
unsigned long * flags )
749
749
__acquires (timer - > base - > lock )
750
750
{
751
751
for (;;) {
752
752
u32 tf = timer -> flags ;
753
- struct tvec_base * base ;
753
+ struct timer_base * base ;
754
754
755
755
if (!(tf & TIMER_MIGRATING )) {
756
- base = per_cpu_ptr (& tvec_bases , tf & TIMER_CPUMASK );
756
+ base = per_cpu_ptr (& timer_bases , tf & TIMER_CPUMASK );
757
757
spin_lock_irqsave (& base -> lock , * flags );
758
758
if (timer -> flags == tf )
759
759
return base ;
@@ -766,7 +766,7 @@ static struct tvec_base *lock_timer_base(struct timer_list *timer,
766
766
static inline int
767
767
__mod_timer (struct timer_list * timer , unsigned long expires , bool pending_only )
768
768
{
769
- struct tvec_base * base , * new_base ;
769
+ struct timer_base * base , * new_base ;
770
770
unsigned long flags ;
771
771
int ret = 0 ;
772
772
@@ -933,8 +933,8 @@ EXPORT_SYMBOL(add_timer);
933
933
*/
934
934
void add_timer_on (struct timer_list * timer , int cpu )
935
935
{
936
- struct tvec_base * new_base = per_cpu_ptr (& tvec_bases , cpu );
937
- struct tvec_base * base ;
936
+ struct timer_base * new_base = per_cpu_ptr (& timer_bases , cpu );
937
+ struct timer_base * base ;
938
938
unsigned long flags ;
939
939
940
940
timer_stats_timer_set_start_info (timer );
@@ -975,7 +975,7 @@ EXPORT_SYMBOL_GPL(add_timer_on);
975
975
*/
976
976
int del_timer (struct timer_list * timer )
977
977
{
978
- struct tvec_base * base ;
978
+ struct timer_base * base ;
979
979
unsigned long flags ;
980
980
int ret = 0 ;
981
981
@@ -1001,7 +1001,7 @@ EXPORT_SYMBOL(del_timer);
1001
1001
*/
1002
1002
int try_to_del_timer_sync (struct timer_list * timer )
1003
1003
{
1004
- struct tvec_base * base ;
1004
+ struct timer_base * base ;
1005
1005
unsigned long flags ;
1006
1006
int ret = -1 ;
1007
1007
@@ -1085,7 +1085,7 @@ int del_timer_sync(struct timer_list *timer)
1085
1085
EXPORT_SYMBOL (del_timer_sync );
1086
1086
#endif
1087
1087
1088
- static int cascade (struct tvec_base * base , struct tvec * tv , int index )
1088
+ static int cascade (struct timer_base * base , struct tvec * tv , int index )
1089
1089
{
1090
1090
/* cascade all the timers from tv up one level */
1091
1091
struct timer_list * timer ;
@@ -1149,7 +1149,7 @@ static void call_timer_fn(struct timer_list *timer, void (*fn)(unsigned long),
1149
1149
}
1150
1150
}
1151
1151
1152
- #define INDEX (N ) ((base->timer_jiffies >> (TVR_BITS + (N) * TVN_BITS)) & TVN_MASK)
1152
+ #define INDEX (N ) ((base->clk >> (TVR_BITS + (N) * TVN_BITS)) & TVN_MASK)
1153
1153
1154
1154
/**
1155
1155
* __run_timers - run all expired timers (if any) on this CPU.
@@ -1158,23 +1158,23 @@ static void call_timer_fn(struct timer_list *timer, void (*fn)(unsigned long),
1158
1158
* This function cascades all vectors and executes all expired timer
1159
1159
* vectors.
1160
1160
*/
1161
- static inline void __run_timers (struct tvec_base * base )
1161
+ static inline void __run_timers (struct timer_base * base )
1162
1162
{
1163
1163
struct timer_list * timer ;
1164
1164
1165
1165
spin_lock_irq (& base -> lock );
1166
1166
1167
- while (time_after_eq (jiffies , base -> timer_jiffies )) {
1167
+ while (time_after_eq (jiffies , base -> clk )) {
1168
1168
struct hlist_head work_list ;
1169
1169
struct hlist_head * head = & work_list ;
1170
1170
int index ;
1171
1171
1172
1172
if (!base -> all_timers ) {
1173
- base -> timer_jiffies = jiffies ;
1173
+ base -> clk = jiffies ;
1174
1174
break ;
1175
1175
}
1176
1176
1177
- index = base -> timer_jiffies & TVR_MASK ;
1177
+ index = base -> clk & TVR_MASK ;
1178
1178
1179
1179
/*
1180
1180
* Cascade timers:
@@ -1184,7 +1184,7 @@ static inline void __run_timers(struct tvec_base *base)
1184
1184
(!cascade (base , & base -> tv3 , INDEX (1 ))) &&
1185
1185
!cascade (base , & base -> tv4 , INDEX (2 )))
1186
1186
cascade (base , & base -> tv5 , INDEX (3 ));
1187
- ++ base -> timer_jiffies ;
1187
+ ++ base -> clk ;
1188
1188
hlist_move_list (base -> tv1 .vec + index , head );
1189
1189
while (!hlist_empty (head )) {
1190
1190
void (* fn )(unsigned long );
@@ -1222,16 +1222,16 @@ static inline void __run_timers(struct tvec_base *base)
1222
1222
* is used on S/390 to stop all activity when a CPU is idle.
1223
1223
* This function needs to be called with interrupts disabled.
1224
1224
*/
1225
- static unsigned long __next_timer_interrupt (struct tvec_base * base )
1225
+ static unsigned long __next_timer_interrupt (struct timer_base * base )
1226
1226
{
1227
- unsigned long timer_jiffies = base -> timer_jiffies ;
1228
- unsigned long expires = timer_jiffies + NEXT_TIMER_MAX_DELTA ;
1227
+ unsigned long clk = base -> clk ;
1228
+ unsigned long expires = clk + NEXT_TIMER_MAX_DELTA ;
1229
1229
int index , slot , array , found = 0 ;
1230
1230
struct timer_list * nte ;
1231
1231
struct tvec * varray [4 ];
1232
1232
1233
1233
/* Look for timer events in tv1. */
1234
- index = slot = timer_jiffies & TVR_MASK ;
1234
+ index = slot = clk & TVR_MASK ;
1235
1235
do {
1236
1236
hlist_for_each_entry (nte , base -> tv1 .vec + slot , entry ) {
1237
1237
if (nte -> flags & TIMER_DEFERRABLE )
@@ -1250,8 +1250,8 @@ static unsigned long __next_timer_interrupt(struct tvec_base *base)
1250
1250
cascade :
1251
1251
/* Calculate the next cascade event */
1252
1252
if (index )
1253
- timer_jiffies += TVR_SIZE - index ;
1254
- timer_jiffies >>= TVR_BITS ;
1253
+ clk += TVR_SIZE - index ;
1254
+ clk >>= TVR_BITS ;
1255
1255
1256
1256
/* Check tv2-tv5. */
1257
1257
varray [0 ] = & base -> tv2 ;
@@ -1262,7 +1262,7 @@ static unsigned long __next_timer_interrupt(struct tvec_base *base)
1262
1262
for (array = 0 ; array < 4 ; array ++ ) {
1263
1263
struct tvec * varp = varray [array ];
1264
1264
1265
- index = slot = timer_jiffies & TVN_MASK ;
1265
+ index = slot = clk & TVN_MASK ;
1266
1266
do {
1267
1267
hlist_for_each_entry (nte , varp -> vec + slot , entry ) {
1268
1268
if (nte -> flags & TIMER_DEFERRABLE )
@@ -1286,8 +1286,8 @@ static unsigned long __next_timer_interrupt(struct tvec_base *base)
1286
1286
} while (slot != index );
1287
1287
1288
1288
if (index )
1289
- timer_jiffies += TVN_SIZE - index ;
1290
- timer_jiffies >>= TVN_BITS ;
1289
+ clk += TVN_SIZE - index ;
1290
+ clk >>= TVN_BITS ;
1291
1291
}
1292
1292
return expires ;
1293
1293
}
@@ -1335,7 +1335,7 @@ static u64 cmp_next_hrtimer_event(u64 basem, u64 expires)
1335
1335
*/
1336
1336
u64 get_next_timer_interrupt (unsigned long basej , u64 basem )
1337
1337
{
1338
- struct tvec_base * base = this_cpu_ptr (& tvec_bases );
1338
+ struct timer_base * base = this_cpu_ptr (& timer_bases );
1339
1339
u64 expires = KTIME_MAX ;
1340
1340
unsigned long nextevt ;
1341
1341
@@ -1348,7 +1348,7 @@ u64 get_next_timer_interrupt(unsigned long basej, u64 basem)
1348
1348
1349
1349
spin_lock (& base -> lock );
1350
1350
if (base -> active_timers ) {
1351
- if (time_before_eq (base -> next_timer , base -> timer_jiffies ))
1351
+ if (time_before_eq (base -> next_timer , base -> clk ))
1352
1352
base -> next_timer = __next_timer_interrupt (base );
1353
1353
nextevt = base -> next_timer ;
1354
1354
if (time_before_eq (nextevt , basej ))
@@ -1387,9 +1387,9 @@ void update_process_times(int user_tick)
1387
1387
*/
1388
1388
static void run_timer_softirq (struct softirq_action * h )
1389
1389
{
1390
- struct tvec_base * base = this_cpu_ptr (& tvec_bases );
1390
+ struct timer_base * base = this_cpu_ptr (& timer_bases );
1391
1391
1392
- if (time_after_eq (jiffies , base -> timer_jiffies ))
1392
+ if (time_after_eq (jiffies , base -> clk ))
1393
1393
__run_timers (base );
1394
1394
}
1395
1395
@@ -1534,7 +1534,7 @@ signed long __sched schedule_timeout_idle(signed long timeout)
1534
1534
EXPORT_SYMBOL (schedule_timeout_idle );
1535
1535
1536
1536
#ifdef CONFIG_HOTPLUG_CPU
1537
- static void migrate_timer_list (struct tvec_base * new_base , struct hlist_head * head )
1537
+ static void migrate_timer_list (struct timer_base * new_base , struct hlist_head * head )
1538
1538
{
1539
1539
struct timer_list * timer ;
1540
1540
int cpu = new_base -> cpu ;
@@ -1550,13 +1550,13 @@ static void migrate_timer_list(struct tvec_base *new_base, struct hlist_head *he
1550
1550
1551
1551
static void migrate_timers (int cpu )
1552
1552
{
1553
- struct tvec_base * old_base ;
1554
- struct tvec_base * new_base ;
1553
+ struct timer_base * old_base ;
1554
+ struct timer_base * new_base ;
1555
1555
int i ;
1556
1556
1557
1557
BUG_ON (cpu_online (cpu ));
1558
- old_base = per_cpu_ptr (& tvec_bases , cpu );
1559
- new_base = get_cpu_ptr (& tvec_bases );
1558
+ old_base = per_cpu_ptr (& timer_bases , cpu );
1559
+ new_base = get_cpu_ptr (& timer_bases );
1560
1560
/*
1561
1561
* The caller is globally serialized and nobody else
1562
1562
* takes two locks at once, deadlock is not possible.
@@ -1580,7 +1580,7 @@ static void migrate_timers(int cpu)
1580
1580
1581
1581
spin_unlock (& old_base -> lock );
1582
1582
spin_unlock_irq (& new_base -> lock );
1583
- put_cpu_ptr (& tvec_bases );
1583
+ put_cpu_ptr (& timer_bases );
1584
1584
}
1585
1585
1586
1586
static int timer_cpu_notify (struct notifier_block * self ,
@@ -1608,13 +1608,13 @@ static inline void timer_register_cpu_notifier(void) { }
1608
1608
1609
1609
static void __init init_timer_cpu (int cpu )
1610
1610
{
1611
- struct tvec_base * base = per_cpu_ptr (& tvec_bases , cpu );
1611
+ struct timer_base * base = per_cpu_ptr (& timer_bases , cpu );
1612
1612
1613
1613
base -> cpu = cpu ;
1614
1614
spin_lock_init (& base -> lock );
1615
1615
1616
- base -> timer_jiffies = jiffies ;
1617
- base -> next_timer = base -> timer_jiffies ;
1616
+ base -> clk = jiffies ;
1617
+ base -> next_timer = base -> clk ;
1618
1618
}
1619
1619
1620
1620
static void __init init_timer_cpus (void )
0 commit comments