@@ -165,17 +165,17 @@ static struct psi_group psi_system = {
165
165
.pcpu = & system_group_pcpu ,
166
166
};
167
167
168
- static void psi_update_work (struct work_struct * work );
168
+ static void psi_avgs_work (struct work_struct * work );
169
169
170
170
static void group_init (struct psi_group * group )
171
171
{
172
172
int cpu ;
173
173
174
174
for_each_possible_cpu (cpu )
175
175
seqcount_init (& per_cpu_ptr (group -> pcpu , cpu )-> seq );
176
- group -> next_update = sched_clock () + psi_period ;
177
- INIT_DELAYED_WORK (& group -> clock_work , psi_update_work );
178
- mutex_init (& group -> stat_lock );
176
+ group -> avg_next_update = sched_clock () + psi_period ;
177
+ INIT_DELAYED_WORK (& group -> avgs_work , psi_avgs_work );
178
+ mutex_init (& group -> avgs_lock );
179
179
}
180
180
181
181
void __init psi_init (void )
@@ -278,7 +278,7 @@ static bool update_stats(struct psi_group *group)
278
278
int cpu ;
279
279
int s ;
280
280
281
- mutex_lock (& group -> stat_lock );
281
+ mutex_lock (& group -> avgs_lock );
282
282
283
283
/*
284
284
* Collect the per-cpu time buckets and average them into a
@@ -319,7 +319,7 @@ static bool update_stats(struct psi_group *group)
319
319
320
320
/* avgX= */
321
321
now = sched_clock ();
322
- expires = group -> next_update ;
322
+ expires = group -> avg_next_update ;
323
323
if (now < expires )
324
324
goto out ;
325
325
if (now - expires >= psi_period )
@@ -332,14 +332,14 @@ static bool update_stats(struct psi_group *group)
332
332
* But the deltas we sample out of the per-cpu buckets above
333
333
* are based on the actual time elapsing between clock ticks.
334
334
*/
335
- group -> next_update = expires + ((1 + missed_periods ) * psi_period );
336
- period = now - (group -> last_update + (missed_periods * psi_period ));
337
- group -> last_update = now ;
335
+ group -> avg_next_update = expires + ((1 + missed_periods ) * psi_period );
336
+ period = now - (group -> avg_last_update + (missed_periods * psi_period ));
337
+ group -> avg_last_update = now ;
338
338
339
339
for (s = 0 ; s < NR_PSI_STATES - 1 ; s ++ ) {
340
340
u32 sample ;
341
341
342
- sample = group -> total [s ] - group -> total_prev [s ];
342
+ sample = group -> total [s ] - group -> avg_total [s ];
343
343
/*
344
344
* Due to the lockless sampling of the time buckets,
345
345
* recorded time deltas can slip into the next period,
@@ -359,22 +359,22 @@ static bool update_stats(struct psi_group *group)
359
359
*/
360
360
if (sample > period )
361
361
sample = period ;
362
- group -> total_prev [s ] += sample ;
362
+ group -> avg_total [s ] += sample ;
363
363
calc_avgs (group -> avg [s ], missed_periods , sample , period );
364
364
}
365
365
out :
366
- mutex_unlock (& group -> stat_lock );
366
+ mutex_unlock (& group -> avgs_lock );
367
367
return nonidle_total ;
368
368
}
369
369
370
- static void psi_update_work (struct work_struct * work )
370
+ static void psi_avgs_work (struct work_struct * work )
371
371
{
372
372
struct delayed_work * dwork ;
373
373
struct psi_group * group ;
374
374
bool nonidle ;
375
375
376
376
dwork = to_delayed_work (work );
377
- group = container_of (dwork , struct psi_group , clock_work );
377
+ group = container_of (dwork , struct psi_group , avgs_work );
378
378
379
379
/*
380
380
* If there is task activity, periodically fold the per-cpu
@@ -391,8 +391,9 @@ static void psi_update_work(struct work_struct *work)
391
391
u64 now ;
392
392
393
393
now = sched_clock ();
394
- if (group -> next_update > now )
395
- delay = nsecs_to_jiffies (group -> next_update - now ) + 1 ;
394
+ if (group -> avg_next_update > now )
395
+ delay = nsecs_to_jiffies (
396
+ group -> avg_next_update - now ) + 1 ;
396
397
schedule_delayed_work (dwork , delay );
397
398
}
398
399
}
@@ -546,13 +547,13 @@ void psi_task_change(struct task_struct *task, int clear, int set)
546
547
*/
547
548
if (unlikely ((clear & TSK_RUNNING ) &&
548
549
(task -> flags & PF_WQ_WORKER ) &&
549
- wq_worker_last_func (task ) == psi_update_work ))
550
+ wq_worker_last_func (task ) == psi_avgs_work ))
550
551
wake_clock = false;
551
552
552
553
while ((group = iterate_groups (task , & iter ))) {
553
554
psi_group_change (group , cpu , clear , set );
554
- if (wake_clock && !delayed_work_pending (& group -> clock_work ))
555
- schedule_delayed_work (& group -> clock_work , PSI_FREQ );
555
+ if (wake_clock && !delayed_work_pending (& group -> avgs_work ))
556
+ schedule_delayed_work (& group -> avgs_work , PSI_FREQ );
556
557
}
557
558
}
558
559
@@ -649,7 +650,7 @@ void psi_cgroup_free(struct cgroup *cgroup)
649
650
if (static_branch_likely (& psi_disabled ))
650
651
return ;
651
652
652
- cancel_delayed_work_sync (& cgroup -> psi .clock_work );
653
+ cancel_delayed_work_sync (& cgroup -> psi .avgs_work );
653
654
free_percpu (cgroup -> psi .pcpu );
654
655
}
655
656
0 commit comments