Skip to content

Commit 0c0bd34

Browse files
committed
Merge branch 'sched-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull scheduler fixes from Ingo Molnar: "Misc fixes, most of them SCHED_DEADLINE fallout" * 'sched-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: sched/deadline: Prevent rt_time growth to infinity sched/deadline: Switch CPU's presence test order sched/deadline: Cleanup RT leftovers from {inc/dec}_dl_migration sched: Fix double normalization of vruntime
2 parents 148b59c + faa5993 commit 0c0bd34

File tree

4 files changed

+20
-10
lines changed

4 files changed

+20
-10
lines changed

kernel/sched/cpudeadline.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -70,7 +70,7 @@ static void cpudl_heapify(struct cpudl *cp, int idx)
7070

7171
static void cpudl_change_key(struct cpudl *cp, int idx, u64 new_dl)
7272
{
73-
WARN_ON(!cpu_present(idx) || idx == IDX_INVALID);
73+
WARN_ON(idx == IDX_INVALID || !cpu_present(idx));
7474

7575
if (dl_time_before(new_dl, cp->elements[idx].dl)) {
7676
cp->elements[idx].dl = new_dl;
@@ -117,7 +117,7 @@ int cpudl_find(struct cpudl *cp, struct task_struct *p,
117117
}
118118

119119
out:
120-
WARN_ON(!cpu_present(best_cpu) && best_cpu != -1);
120+
WARN_ON(best_cpu != -1 && !cpu_present(best_cpu));
121121

122122
return best_cpu;
123123
}

kernel/sched/deadline.c

Lines changed: 6 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -135,7 +135,6 @@ static void update_dl_migration(struct dl_rq *dl_rq)
135135
static void inc_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
136136
{
137137
struct task_struct *p = dl_task_of(dl_se);
138-
dl_rq = &rq_of_dl_rq(dl_rq)->dl;
139138

140139
if (p->nr_cpus_allowed > 1)
141140
dl_rq->dl_nr_migratory++;
@@ -146,7 +145,6 @@ static void inc_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
146145
static void dec_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
147146
{
148147
struct task_struct *p = dl_task_of(dl_se);
149-
dl_rq = &rq_of_dl_rq(dl_rq)->dl;
150148

151149
if (p->nr_cpus_allowed > 1)
152150
dl_rq->dl_nr_migratory--;
@@ -564,6 +562,8 @@ int dl_runtime_exceeded(struct rq *rq, struct sched_dl_entity *dl_se)
564562
return 1;
565563
}
566564

565+
extern bool sched_rt_bandwidth_account(struct rt_rq *rt_rq);
566+
567567
/*
568568
* Update the current task's runtime statistics (provided it is still
569569
* a -deadline task and has not been removed from the dl_rq).
@@ -627,11 +627,13 @@ static void update_curr_dl(struct rq *rq)
627627
struct rt_rq *rt_rq = &rq->rt;
628628

629629
raw_spin_lock(&rt_rq->rt_runtime_lock);
630-
rt_rq->rt_time += delta_exec;
631630
/*
632631
* We'll let actual RT tasks worry about the overflow here, we
633-
* have our own CBS to keep us inline -- see above.
632+
* have our own CBS to keep us inline; only account when RT
633+
* bandwidth is relevant.
634634
*/
635+
if (sched_rt_bandwidth_account(rt_rq))
636+
rt_rq->rt_time += delta_exec;
635637
raw_spin_unlock(&rt_rq->rt_runtime_lock);
636638
}
637639
}

kernel/sched/fair.c

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -7001,15 +7001,15 @@ static void switched_from_fair(struct rq *rq, struct task_struct *p)
70017001
struct cfs_rq *cfs_rq = cfs_rq_of(se);
70027002

70037003
/*
7004-
* Ensure the task's vruntime is normalized, so that when its
7004+
* Ensure the task's vruntime is normalized, so that when it's
70057005
* switched back to the fair class the enqueue_entity(.flags=0) will
70067006
* do the right thing.
70077007
*
7008-
* If it was on_rq, then the dequeue_entity(.flags=0) will already
7009-
* have normalized the vruntime, if it was !on_rq, then only when
7008+
* If it's on_rq, then the dequeue_entity(.flags=0) will already
7009+
* have normalized the vruntime, if it's !on_rq, then only when
70107010
* the task is sleeping will it still have non-normalized vruntime.
70117011
*/
7012-
if (!se->on_rq && p->state != TASK_RUNNING) {
7012+
if (!p->on_rq && p->state != TASK_RUNNING) {
70137013
/*
70147014
* Fix up our vruntime so that the current sleep doesn't
70157015
* cause 'unlimited' sleep bonus.

kernel/sched/rt.c

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -538,6 +538,14 @@ static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq)
538538

539539
#endif /* CONFIG_RT_GROUP_SCHED */
540540

541+
bool sched_rt_bandwidth_account(struct rt_rq *rt_rq)
542+
{
543+
struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
544+
545+
return (hrtimer_active(&rt_b->rt_period_timer) ||
546+
rt_rq->rt_time < rt_b->rt_runtime);
547+
}
548+
541549
#ifdef CONFIG_SMP
542550
/*
543551
* We ran out of runtime, see if we can borrow some from our neighbours.

0 commit comments

Comments
 (0)