Skip to content

Commit f707e40

Browse files
committed
Merge tag 'sched-urgent-2023-10-08' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull misc scheduler fixes from Ingo Molnar: - Two EEVDF fixes: one to fix sysctl_sched_base_slice propagation, and to fix an avg_vruntime() corner-case. - A cpufreq frequency scaling fix * tag 'sched-urgent-2023-10-08' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: cpufreq: schedutil: Update next_freq when cpufreq_limits change sched/eevdf: Fix avg_vruntime() sched/eevdf: Also update slice on placement
2 parents 7e20d34 + 9e0bc36 commit f707e40

File tree

2 files changed

+15
-4
lines changed

2 files changed

+15
-4
lines changed

kernel/sched/cpufreq_schedutil.c

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -350,7 +350,8 @@ static void sugov_update_single_freq(struct update_util_data *hook, u64 time,
350350
* Except when the rq is capped by uclamp_max.
351351
*/
352352
if (!uclamp_rq_is_capped(cpu_rq(sg_cpu->cpu)) &&
353-
sugov_cpu_is_busy(sg_cpu) && next_f < sg_policy->next_freq) {
353+
sugov_cpu_is_busy(sg_cpu) && next_f < sg_policy->next_freq &&
354+
!sg_policy->need_freq_update) {
354355
next_f = sg_policy->next_freq;
355356

356357
/* Restore cached freq as next_freq has changed */

kernel/sched/fair.c

Lines changed: 13 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -664,6 +664,10 @@ void avg_vruntime_update(struct cfs_rq *cfs_rq, s64 delta)
664664
cfs_rq->avg_vruntime -= cfs_rq->avg_load * delta;
665665
}
666666

667+
/*
668+
* Specifically: avg_runtime() + 0 must result in entity_eligible() := true
669+
* For this to be so, the result of this function must have a left bias.
670+
*/
667671
u64 avg_vruntime(struct cfs_rq *cfs_rq)
668672
{
669673
struct sched_entity *curr = cfs_rq->curr;
@@ -677,8 +681,12 @@ u64 avg_vruntime(struct cfs_rq *cfs_rq)
677681
load += weight;
678682
}
679683

680-
if (load)
684+
if (load) {
685+
/* sign flips effective floor / ceil */
686+
if (avg < 0)
687+
avg -= (load - 1);
681688
avg = div_s64(avg, load);
689+
}
682690

683691
return cfs_rq->min_vruntime + avg;
684692
}
@@ -4919,10 +4927,12 @@ static inline void update_misfit_status(struct task_struct *p, struct rq *rq) {}
49194927
static void
49204928
place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
49214929
{
4922-
u64 vslice = calc_delta_fair(se->slice, se);
4923-
u64 vruntime = avg_vruntime(cfs_rq);
4930+
u64 vslice, vruntime = avg_vruntime(cfs_rq);
49244931
s64 lag = 0;
49254932

4933+
se->slice = sysctl_sched_base_slice;
4934+
vslice = calc_delta_fair(se->slice, se);
4935+
49264936
/*
49274937
* Due to how V is constructed as the weighted average of entities,
49284938
* adding tasks with positive lag, or removing tasks with negative lag

0 commit comments

Comments
 (0)