Skip to content

Commit ae3e10a

Browse files
committed
Merge branch 'sched-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull scheduler fixes from Ingo Molnar: "Misc fixes: - a deadline scheduler related bug fix which triggered a kernel warning - an RT_RUNTIME_SHARE fix - a stop_machine preemption fix - a potential NULL dereference fix in sched_domain_debug_one()" * 'sched-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: sched/rt: Restore rt_runtime after disabling RT_RUNTIME_SHARE sched/deadline: Update rq_clock of later_rq when pushing a task stop_machine: Disable preemption after queueing stopper threads sched/topology: Check variable group before dereferencing it
2 parents 0634922 + f3d133e commit ae3e10a

File tree

4 files changed

+19
-3
lines changed

4 files changed

+19
-3
lines changed

kernel/sched/deadline.c

Lines changed: 7 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2090,8 +2090,14 @@ static int push_dl_task(struct rq *rq)
20902090
sub_rq_bw(&next_task->dl, &rq->dl);
20912091
set_task_cpu(next_task, later_rq->cpu);
20922092
add_rq_bw(&next_task->dl, &later_rq->dl);
2093+
2094+
/*
2095+
* Update the later_rq clock here, because the clock is used
2096+
* by the cpufreq_update_util() inside __add_running_bw().
2097+
*/
2098+
update_rq_clock(later_rq);
20932099
add_running_bw(&next_task->dl, &later_rq->dl);
2094-
activate_task(later_rq, next_task, 0);
2100+
activate_task(later_rq, next_task, ENQUEUE_NOCLOCK);
20952101
ret = 1;
20962102

20972103
resched_curr(later_rq);

kernel/sched/rt.c

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -836,6 +836,8 @@ static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun)
836836
* can be time-consuming. Try to avoid it when possible.
837837
*/
838838
raw_spin_lock(&rt_rq->rt_runtime_lock);
839+
if (!sched_feat(RT_RUNTIME_SHARE) && rt_rq->rt_runtime != RUNTIME_INF)
840+
rt_rq->rt_runtime = rt_b->rt_runtime;
839841
skip = !rt_rq->rt_time && !rt_rq->rt_nr_running;
840842
raw_spin_unlock(&rt_rq->rt_runtime_lock);
841843
if (skip)

kernel/sched/topology.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -47,7 +47,7 @@ static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level,
4747
if (!cpumask_test_cpu(cpu, sched_domain_span(sd))) {
4848
printk(KERN_ERR "ERROR: domain->span does not contain CPU%d\n", cpu);
4949
}
50-
if (!cpumask_test_cpu(cpu, sched_group_span(group))) {
50+
if (group && !cpumask_test_cpu(cpu, sched_group_span(group))) {
5151
printk(KERN_ERR "ERROR: domain->groups does not contain CPU%d\n", cpu);
5252
}
5353

kernel/stop_machine.c

Lines changed: 9 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -260,6 +260,15 @@ static int cpu_stop_queue_two_works(int cpu1, struct cpu_stop_work *work1,
260260
err = 0;
261261
__cpu_stop_queue_work(stopper1, work1, &wakeq);
262262
__cpu_stop_queue_work(stopper2, work2, &wakeq);
263+
/*
264+
* The waking up of stopper threads has to happen
265+
* in the same scheduling context as the queueing.
266+
* Otherwise, there is a possibility of one of the
267+
* above stoppers being woken up by another CPU,
268+
* and preempting us. This will cause us to n ot
269+
* wake up the other stopper forever.
270+
*/
271+
preempt_disable();
263272
unlock:
264273
raw_spin_unlock(&stopper2->lock);
265274
raw_spin_unlock_irq(&stopper1->lock);
@@ -271,7 +280,6 @@ static int cpu_stop_queue_two_works(int cpu1, struct cpu_stop_work *work1,
271280
}
272281

273282
if (!err) {
274-
preempt_disable();
275283
wake_up_q(&wakeq);
276284
preempt_enable();
277285
}

0 commit comments

Comments
 (0)