@@ -179,12 +179,17 @@ static unsigned int get_next_freq(struct sugov_policy *sg_policy,
179
179
static void sugov_get_util (unsigned long * util , unsigned long * max , int cpu )
180
180
{
181
181
struct rq * rq = cpu_rq (cpu );
182
- unsigned long cfs_max ;
182
+ unsigned long util_cfs = cpu_util_cfs (rq );
183
+ unsigned long util_dl = cpu_util_dl (rq );
183
184
184
- cfs_max = arch_scale_cpu_capacity (NULL , cpu );
185
+ * max = arch_scale_cpu_capacity (NULL , cpu );
185
186
186
- * util = min (rq -> cfs .avg .util_avg , cfs_max );
187
- * max = cfs_max ;
187
+ /*
188
+ * Ideally we would like to set util_dl as min/guaranteed freq and
189
+ * util_cfs + util_dl as requested freq. However, cpufreq is not yet
190
+ * ready for such an interface. So, we only do the latter for now.
191
+ */
192
+ * util = min (util_cfs + util_dl , * max );
188
193
}
189
194
190
195
static void sugov_set_iowait_boost (struct sugov_cpu * sg_cpu , u64 time )
@@ -271,7 +276,7 @@ static void sugov_update_single(struct update_util_data *hook, u64 time,
271
276
272
277
busy = sugov_cpu_is_busy (sg_cpu );
273
278
274
- if (flags & SCHED_CPUFREQ_RT_DL ) {
279
+ if (flags & SCHED_CPUFREQ_RT ) {
275
280
next_f = policy -> cpuinfo .max_freq ;
276
281
} else {
277
282
sugov_get_util (& util , & max , sg_cpu -> cpu );
@@ -316,7 +321,7 @@ static unsigned int sugov_next_freq_shared(struct sugov_cpu *sg_cpu, u64 time)
316
321
j_sg_cpu -> iowait_boost_pending = false;
317
322
continue ;
318
323
}
319
- if (j_sg_cpu -> flags & SCHED_CPUFREQ_RT_DL )
324
+ if (j_sg_cpu -> flags & SCHED_CPUFREQ_RT )
320
325
return policy -> cpuinfo .max_freq ;
321
326
322
327
j_util = j_sg_cpu -> util ;
@@ -352,7 +357,7 @@ static void sugov_update_shared(struct update_util_data *hook, u64 time,
352
357
sg_cpu -> last_update = time ;
353
358
354
359
if (sugov_should_update_freq (sg_policy , time )) {
355
- if (flags & SCHED_CPUFREQ_RT_DL )
360
+ if (flags & SCHED_CPUFREQ_RT )
356
361
next_f = sg_policy -> policy -> cpuinfo .max_freq ;
357
362
else
358
363
next_f = sugov_next_freq_shared (sg_cpu , time );
@@ -382,9 +387,9 @@ static void sugov_irq_work(struct irq_work *irq_work)
382
387
sg_policy = container_of (irq_work , struct sugov_policy , irq_work );
383
388
384
389
/*
385
- * For RT and deadline tasks, the schedutil governor shoots the
386
- * frequency to maximum. Special care must be taken to ensure that this
387
- * kthread doesn't result in the same behavior.
390
+ * For RT tasks, the schedutil governor shoots the frequency to maximum.
391
+ * Special care must be taken to ensure that this kthread doesn't result
392
+ * in the same behavior.
388
393
*
389
394
* This is (mostly) guaranteed by the work_in_progress flag. The flag is
390
395
* updated only at the end of the sugov_work() function and before that
0 commit comments