Skip to content

Commit d4edd66

Browse files
Juri LelliClaudio Scordino
authored andcommitted
sched/cpufreq: Use the DEADLINE utilization signal
SCHED_DEADLINE tracks active utilization signal with a per dl_rq variable named running_bw. Make use of that to drive CPU frequency selection: add up FAIR and DEADLINE contribution to get the required CPU capacity to handle both requirements (while RT still selects max frequency). Co-authored-by: Claudio Scordino <[email protected]> Signed-off-by: Juri Lelli <[email protected]> Signed-off-by: Peter Zijlstra (Intel) <[email protected]> Acked-by: Viresh Kumar <[email protected]> Cc: Linus Torvalds <[email protected]> Cc: Luca Abeni <[email protected]> Cc: Peter Zijlstra <[email protected]> Cc: Rafael J . Wysocki <[email protected]> Cc: Thomas Gleixner <[email protected]> Cc: [email protected] Cc: [email protected] Cc: [email protected] Cc: [email protected] Cc: [email protected] Cc: [email protected] Cc: [email protected] Cc: [email protected] Cc: [email protected] Cc: [email protected] Cc: [email protected] Cc: [email protected] Cc: [email protected] Link: http://lkml.kernel.org/r/[email protected] Signed-off-by: Ingo Molnar <[email protected]>
1 parent 34be393 commit d4edd66

File tree

3 files changed

+25
-12
lines changed

3 files changed

+25
-12
lines changed

include/linux/sched/cpufreq.h

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -12,8 +12,6 @@
1212
#define SCHED_CPUFREQ_DL (1U << 1)
1313
#define SCHED_CPUFREQ_IOWAIT (1U << 2)
1414

15-
#define SCHED_CPUFREQ_RT_DL (SCHED_CPUFREQ_RT | SCHED_CPUFREQ_DL)
16-
1715
#ifdef CONFIG_CPU_FREQ
1816
struct update_util_data {
1917
void (*func)(struct update_util_data *data, u64 time, unsigned int flags);

kernel/sched/cpufreq_schedutil.c

Lines changed: 15 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -179,12 +179,17 @@ static unsigned int get_next_freq(struct sugov_policy *sg_policy,
179179
static void sugov_get_util(unsigned long *util, unsigned long *max, int cpu)
180180
{
181181
struct rq *rq = cpu_rq(cpu);
182-
unsigned long cfs_max;
182+
unsigned long util_cfs = cpu_util_cfs(rq);
183+
unsigned long util_dl = cpu_util_dl(rq);
183184

184-
cfs_max = arch_scale_cpu_capacity(NULL, cpu);
185+
*max = arch_scale_cpu_capacity(NULL, cpu);
185186

186-
*util = min(rq->cfs.avg.util_avg, cfs_max);
187-
*max = cfs_max;
187+
/*
188+
* Ideally we would like to set util_dl as min/guaranteed freq and
189+
* util_cfs + util_dl as requested freq. However, cpufreq is not yet
190+
* ready for such an interface. So, we only do the latter for now.
191+
*/
192+
*util = min(util_cfs + util_dl, *max);
188193
}
189194

190195
static void sugov_set_iowait_boost(struct sugov_cpu *sg_cpu, u64 time)
@@ -271,7 +276,7 @@ static void sugov_update_single(struct update_util_data *hook, u64 time,
271276

272277
busy = sugov_cpu_is_busy(sg_cpu);
273278

274-
if (flags & SCHED_CPUFREQ_RT_DL) {
279+
if (flags & SCHED_CPUFREQ_RT) {
275280
next_f = policy->cpuinfo.max_freq;
276281
} else {
277282
sugov_get_util(&util, &max, sg_cpu->cpu);
@@ -316,7 +321,7 @@ static unsigned int sugov_next_freq_shared(struct sugov_cpu *sg_cpu, u64 time)
316321
j_sg_cpu->iowait_boost_pending = false;
317322
continue;
318323
}
319-
if (j_sg_cpu->flags & SCHED_CPUFREQ_RT_DL)
324+
if (j_sg_cpu->flags & SCHED_CPUFREQ_RT)
320325
return policy->cpuinfo.max_freq;
321326

322327
j_util = j_sg_cpu->util;
@@ -352,7 +357,7 @@ static void sugov_update_shared(struct update_util_data *hook, u64 time,
352357
sg_cpu->last_update = time;
353358

354359
if (sugov_should_update_freq(sg_policy, time)) {
355-
if (flags & SCHED_CPUFREQ_RT_DL)
360+
if (flags & SCHED_CPUFREQ_RT)
356361
next_f = sg_policy->policy->cpuinfo.max_freq;
357362
else
358363
next_f = sugov_next_freq_shared(sg_cpu, time);
@@ -382,9 +387,9 @@ static void sugov_irq_work(struct irq_work *irq_work)
382387
sg_policy = container_of(irq_work, struct sugov_policy, irq_work);
383388

384389
/*
385-
* For RT and deadline tasks, the schedutil governor shoots the
386-
* frequency to maximum. Special care must be taken to ensure that this
387-
* kthread doesn't result in the same behavior.
390+
* For RT tasks, the schedutil governor shoots the frequency to maximum.
391+
* Special care must be taken to ensure that this kthread doesn't result
392+
* in the same behavior.
388393
*
389394
* This is (mostly) guaranteed by the work_in_progress flag. The flag is
390395
* updated only at the end of the sugov_work() function and before that

kernel/sched/sched.h

Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2084,3 +2084,13 @@ static inline void cpufreq_update_util(struct rq *rq, unsigned int flags) {}
20842084
#else /* arch_scale_freq_capacity */
20852085
#define arch_scale_freq_invariant() (false)
20862086
#endif
2087+
2088+
static inline unsigned long cpu_util_dl(struct rq *rq)
2089+
{
2090+
return (rq->dl.running_bw * SCHED_CAPACITY_SCALE) >> BW_SHIFT;
2091+
}
2092+
2093+
static inline unsigned long cpu_util_cfs(struct rq *rq)
2094+
{
2095+
return rq->cfs.avg.util_avg;
2096+
}

0 commit comments

Comments
 (0)