Skip to content

Commit bcc78db

Browse files
surenbaghdasaryantorvalds
authored andcommitted
psi: rename psi fields in preparation for psi trigger addition
Rename psi_group structure member fields used for calculating psi totals and averages for clear distinction between them and for trigger-related fields that will be added by "psi: introduce psi monitor". [[email protected]: v6] Link: http://lkml.kernel.org/r/[email protected] Link: http://lkml.kernel.org/r/[email protected] Signed-off-by: Suren Baghdasaryan <[email protected]> Acked-by: Johannes Weiner <[email protected]> Cc: Dennis Zhou <[email protected]> Cc: Ingo Molnar <[email protected]> Cc: Jens Axboe <[email protected]> Cc: Li Zefan <[email protected]> Cc: Peter Zijlstra <[email protected]> Cc: Tejun Heo <[email protected]> Signed-off-by: Andrew Morton <[email protected]> Signed-off-by: Linus Torvalds <[email protected]>
1 parent 9289c5e commit bcc78db

File tree

2 files changed

+28
-27
lines changed

2 files changed

+28
-27
lines changed

include/linux/psi_types.h

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -69,17 +69,17 @@ struct psi_group_cpu {
6969
};
7070

7171
struct psi_group {
72-
/* Protects data updated during an aggregation */
73-
struct mutex stat_lock;
72+
/* Protects data used by the aggregator */
73+
struct mutex avgs_lock;
7474

7575
/* Per-cpu task state & time tracking */
7676
struct psi_group_cpu __percpu *pcpu;
7777

78-
/* Periodic aggregation state */
79-
u64 total_prev[NR_PSI_STATES - 1];
80-
u64 last_update;
81-
u64 next_update;
82-
struct delayed_work clock_work;
78+
/* Running pressure averages */
79+
u64 avg_total[NR_PSI_STATES - 1];
80+
u64 avg_last_update;
81+
u64 avg_next_update;
82+
struct delayed_work avgs_work;
8383

8484
/* Total stall times and sampled pressure averages */
8585
u64 total[NR_PSI_STATES - 1];

kernel/sched/psi.c

Lines changed: 21 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -165,17 +165,17 @@ static struct psi_group psi_system = {
165165
.pcpu = &system_group_pcpu,
166166
};
167167

168-
static void psi_update_work(struct work_struct *work);
168+
static void psi_avgs_work(struct work_struct *work);
169169

170170
static void group_init(struct psi_group *group)
171171
{
172172
int cpu;
173173

174174
for_each_possible_cpu(cpu)
175175
seqcount_init(&per_cpu_ptr(group->pcpu, cpu)->seq);
176-
group->next_update = sched_clock() + psi_period;
177-
INIT_DELAYED_WORK(&group->clock_work, psi_update_work);
178-
mutex_init(&group->stat_lock);
176+
group->avg_next_update = sched_clock() + psi_period;
177+
INIT_DELAYED_WORK(&group->avgs_work, psi_avgs_work);
178+
mutex_init(&group->avgs_lock);
179179
}
180180

181181
void __init psi_init(void)
@@ -278,7 +278,7 @@ static bool update_stats(struct psi_group *group)
278278
int cpu;
279279
int s;
280280

281-
mutex_lock(&group->stat_lock);
281+
mutex_lock(&group->avgs_lock);
282282

283283
/*
284284
* Collect the per-cpu time buckets and average them into a
@@ -319,7 +319,7 @@ static bool update_stats(struct psi_group *group)
319319

320320
/* avgX= */
321321
now = sched_clock();
322-
expires = group->next_update;
322+
expires = group->avg_next_update;
323323
if (now < expires)
324324
goto out;
325325
if (now - expires >= psi_period)
@@ -332,14 +332,14 @@ static bool update_stats(struct psi_group *group)
332332
* But the deltas we sample out of the per-cpu buckets above
333333
* are based on the actual time elapsing between clock ticks.
334334
*/
335-
group->next_update = expires + ((1 + missed_periods) * psi_period);
336-
period = now - (group->last_update + (missed_periods * psi_period));
337-
group->last_update = now;
335+
group->avg_next_update = expires + ((1 + missed_periods) * psi_period);
336+
period = now - (group->avg_last_update + (missed_periods * psi_period));
337+
group->avg_last_update = now;
338338

339339
for (s = 0; s < NR_PSI_STATES - 1; s++) {
340340
u32 sample;
341341

342-
sample = group->total[s] - group->total_prev[s];
342+
sample = group->total[s] - group->avg_total[s];
343343
/*
344344
* Due to the lockless sampling of the time buckets,
345345
* recorded time deltas can slip into the next period,
@@ -359,22 +359,22 @@ static bool update_stats(struct psi_group *group)
359359
*/
360360
if (sample > period)
361361
sample = period;
362-
group->total_prev[s] += sample;
362+
group->avg_total[s] += sample;
363363
calc_avgs(group->avg[s], missed_periods, sample, period);
364364
}
365365
out:
366-
mutex_unlock(&group->stat_lock);
366+
mutex_unlock(&group->avgs_lock);
367367
return nonidle_total;
368368
}
369369

370-
static void psi_update_work(struct work_struct *work)
370+
static void psi_avgs_work(struct work_struct *work)
371371
{
372372
struct delayed_work *dwork;
373373
struct psi_group *group;
374374
bool nonidle;
375375

376376
dwork = to_delayed_work(work);
377-
group = container_of(dwork, struct psi_group, clock_work);
377+
group = container_of(dwork, struct psi_group, avgs_work);
378378

379379
/*
380380
* If there is task activity, periodically fold the per-cpu
@@ -391,8 +391,9 @@ static void psi_update_work(struct work_struct *work)
391391
u64 now;
392392

393393
now = sched_clock();
394-
if (group->next_update > now)
395-
delay = nsecs_to_jiffies(group->next_update - now) + 1;
394+
if (group->avg_next_update > now)
395+
delay = nsecs_to_jiffies(
396+
group->avg_next_update - now) + 1;
396397
schedule_delayed_work(dwork, delay);
397398
}
398399
}
@@ -546,13 +547,13 @@ void psi_task_change(struct task_struct *task, int clear, int set)
546547
*/
547548
if (unlikely((clear & TSK_RUNNING) &&
548549
(task->flags & PF_WQ_WORKER) &&
549-
wq_worker_last_func(task) == psi_update_work))
550+
wq_worker_last_func(task) == psi_avgs_work))
550551
wake_clock = false;
551552

552553
while ((group = iterate_groups(task, &iter))) {
553554
psi_group_change(group, cpu, clear, set);
554-
if (wake_clock && !delayed_work_pending(&group->clock_work))
555-
schedule_delayed_work(&group->clock_work, PSI_FREQ);
555+
if (wake_clock && !delayed_work_pending(&group->avgs_work))
556+
schedule_delayed_work(&group->avgs_work, PSI_FREQ);
556557
}
557558
}
558559

@@ -649,7 +650,7 @@ void psi_cgroup_free(struct cgroup *cgroup)
649650
if (static_branch_likely(&psi_disabled))
650651
return;
651652

652-
cancel_delayed_work_sync(&cgroup->psi.clock_work);
653+
cancel_delayed_work_sync(&cgroup->psi.avgs_work);
653654
free_percpu(cgroup->psi.pcpu);
654655
}
655656

0 commit comments

Comments
 (0)