@@ -1252,8 +1252,8 @@ static void expire_timers(struct timer_base *base, struct hlist_head *head)
1252
1252
}
1253
1253
}
1254
1254
1255
- static int collect_expired_timers (struct timer_base * base ,
1256
- struct hlist_head * heads )
1255
+ static int __collect_expired_timers (struct timer_base * base ,
1256
+ struct hlist_head * heads )
1257
1257
{
1258
1258
unsigned long clk = base -> clk ;
1259
1259
struct hlist_head * vec ;
@@ -1279,9 +1279,9 @@ static int collect_expired_timers(struct timer_base *base,
1279
1279
1280
1280
#ifdef CONFIG_NO_HZ_COMMON
1281
1281
/*
1282
- * Find the next pending bucket of a level. Search from @offset + @clk upwards
1283
- * and if nothing there, search from start of the level (@offset) up to
1284
- * @offset + clk.
1282
+ * Find the next pending bucket of a level. Search from level start (@offset)
1283
+ * + @clk upwards and if nothing there, search from start of the level
1284
+ * (@offset) up to @offset + clk.
1285
1285
*/
1286
1286
static int next_pending_bucket (struct timer_base * base , unsigned offset ,
1287
1287
unsigned clk )
@@ -1298,14 +1298,14 @@ static int next_pending_bucket(struct timer_base *base, unsigned offset,
1298
1298
}
1299
1299
1300
1300
/*
1301
- * Search the first expiring timer in the various clock levels.
1301
+ * Search the first expiring timer in the various clock levels. Caller must
1302
+ * hold base->lock.
1302
1303
*/
1303
1304
static unsigned long __next_timer_interrupt (struct timer_base * base )
1304
1305
{
1305
1306
unsigned long clk , next , adj ;
1306
1307
unsigned lvl , offset = 0 ;
1307
1308
1308
- spin_lock (& base -> lock );
1309
1309
next = base -> clk + NEXT_TIMER_MAX_DELTA ;
1310
1310
clk = base -> clk ;
1311
1311
for (lvl = 0 ; lvl < LVL_DEPTH ; lvl ++ , offset += LVL_SIZE ) {
@@ -1358,7 +1358,6 @@ static unsigned long __next_timer_interrupt(struct timer_base *base)
1358
1358
clk >>= LVL_CLK_SHIFT ;
1359
1359
clk += adj ;
1360
1360
}
1361
- spin_unlock (& base -> lock );
1362
1361
return next ;
1363
1362
}
1364
1363
@@ -1416,14 +1415,48 @@ u64 get_next_timer_interrupt(unsigned long basej, u64 basem)
1416
1415
if (cpu_is_offline (smp_processor_id ()))
1417
1416
return expires ;
1418
1417
1418
+ spin_lock (& base -> lock );
1419
1419
nextevt = __next_timer_interrupt (base );
1420
+ spin_unlock (& base -> lock );
1421
+
1420
1422
if (time_before_eq (nextevt , basej ))
1421
1423
expires = basem ;
1422
1424
else
1423
1425
expires = basem + (nextevt - basej ) * TICK_NSEC ;
1424
1426
1425
1427
return cmp_next_hrtimer_event (basem , expires );
1426
1428
}
1429
+
1430
+ static int collect_expired_timers (struct timer_base * base ,
1431
+ struct hlist_head * heads )
1432
+ {
1433
+ /*
1434
+ * NOHZ optimization. After a long idle sleep we need to forward the
1435
+ * base to current jiffies. Avoid a loop by searching the bitfield for
1436
+ * the next expiring timer.
1437
+ */
1438
+ if ((long )(jiffies - base -> clk ) > 2 ) {
1439
+ unsigned long next = __next_timer_interrupt (base );
1440
+
1441
+ /*
1442
+ * If the next timer is ahead of time forward to current
1443
+ * jiffies, otherwise forward to the next expiry time.
1444
+ */
1445
+ if (time_after (next , jiffies )) {
1446
+ /* The call site will increment clock! */
1447
+ base -> clk = jiffies - 1 ;
1448
+ return 0 ;
1449
+ }
1450
+ base -> clk = next ;
1451
+ }
1452
+ return __collect_expired_timers (base , heads );
1453
+ }
1454
+ #else
1455
+ static inline int collect_expired_timers (struct timer_base * base ,
1456
+ struct hlist_head * heads )
1457
+ {
1458
+ return __collect_expired_timers (base , heads );
1459
+ }
1427
1460
#endif
1428
1461
1429
1462
/*
0 commit comments