Skip to content

Commit e4099a5

Browse files
Peter ZijlstraIngo Molnar
authored andcommitted
sched/deadline: Fix up the smp-affinity mask tests
For now deadline tasks are not allowed to set smp affinity; however the current tests are wrong, cure this. The test in __sched_setscheduler() also uses an on-stack cpumask_t which is a no-no. Change both tests to use cpumask_subset() such that we test the root domain span to be a subset of the cpus_allowed mask. This way we're sure the tasks can always run on all CPUs they can be balanced over, and have no effective affinity constraints. Signed-off-by: Peter Zijlstra <[email protected]> Link: http://lkml.kernel.org/n/[email protected] Signed-off-by: Ingo Molnar <[email protected]>
1 parent 6bfd6d7 commit e4099a5

File tree

1 file changed

+9
-19
lines changed

1 file changed

+9
-19
lines changed

kernel/sched/core.c

Lines changed: 9 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -3384,23 +3384,14 @@ static int __sched_setscheduler(struct task_struct *p,
33843384
#ifdef CONFIG_SMP
33853385
if (dl_bandwidth_enabled() && dl_policy(policy)) {
33863386
cpumask_t *span = rq->rd->span;
3387-
cpumask_t act_affinity;
3388-
3389-
/*
3390-
* cpus_allowed mask is statically initialized with
3391-
* CPU_MASK_ALL, span is instead dynamic. Here we
3392-
* compute the "dynamic" affinity of a task.
3393-
*/
3394-
cpumask_and(&act_affinity, &p->cpus_allowed,
3395-
cpu_active_mask);
33963387

33973388
/*
33983389
* Don't allow tasks with an affinity mask smaller than
33993390
* the entire root_domain to become SCHED_DEADLINE. We
34003391
* will also fail if there's no bandwidth available.
34013392
*/
3402-
if (!cpumask_equal(&act_affinity, span) ||
3403-
rq->rd->dl_bw.bw == 0) {
3393+
if (!cpumask_subset(span, &p->cpus_allowed) ||
3394+
rq->rd->dl_bw.bw == 0) {
34043395
task_rq_unlock(rq, p, &flags);
34053396
return -EPERM;
34063397
}
@@ -3420,8 +3411,7 @@ static int __sched_setscheduler(struct task_struct *p,
34203411
* of a SCHED_DEADLINE task) we need to check if enough bandwidth
34213412
* is available.
34223413
*/
3423-
if ((dl_policy(policy) || dl_task(p)) &&
3424-
dl_overflow(p, policy, attr)) {
3414+
if ((dl_policy(policy) || dl_task(p)) && dl_overflow(p, policy, attr)) {
34253415
task_rq_unlock(rq, p, &flags);
34263416
return -EBUSY;
34273417
}
@@ -3860,6 +3850,10 @@ long sched_setaffinity(pid_t pid, const struct cpumask *in_mask)
38603850
if (retval)
38613851
goto out_unlock;
38623852

3853+
3854+
cpuset_cpus_allowed(p, cpus_allowed);
3855+
cpumask_and(new_mask, in_mask, cpus_allowed);
3856+
38633857
/*
38643858
* Since bandwidth control happens on root_domain basis,
38653859
* if admission test is enabled, we only admit -deadline
@@ -3870,16 +3864,12 @@ long sched_setaffinity(pid_t pid, const struct cpumask *in_mask)
38703864
if (task_has_dl_policy(p)) {
38713865
const struct cpumask *span = task_rq(p)->rd->span;
38723866

3873-
if (dl_bandwidth_enabled() &&
3874-
!cpumask_equal(in_mask, span)) {
3867+
if (dl_bandwidth_enabled() && !cpumask_subset(span, new_mask)) {
38753868
retval = -EBUSY;
38763869
goto out_unlock;
38773870
}
38783871
}
38793872
#endif
3880-
3881-
cpuset_cpus_allowed(p, cpus_allowed);
3882-
cpumask_and(new_mask, in_mask, cpus_allowed);
38833873
again:
38843874
retval = set_cpus_allowed_ptr(p, new_mask);
38853875

@@ -4535,7 +4525,7 @@ EXPORT_SYMBOL_GPL(set_cpus_allowed_ptr);
45354525
* When dealing with a -deadline task, we have to check if moving it to
45364526
* a new CPU is possible or not. In fact, this is only true iff there
45374527
* is enough bandwidth available on such CPU, otherwise we want the
4538-
* whole migration progedure to fail over.
4528+
* whole migration procedure to fail over.
45394529
*/
45404530
static inline
45414531
bool set_task_cpu_dl(struct task_struct *p, unsigned int cpu)

0 commit comments

Comments
 (0)