Skip to content

Commit 175f0e2

Browse files
Peter ZijlstraIngo Molnar
authored andcommitted
sched/core: Fix rules for running on online && !active CPUs
As already enforced by the WARN() in __set_cpus_allowed_ptr(), the rules for running on an online && !active CPU are stricter than just being a kthread, you need to be a per-cpu kthread. If you're not strictly per-CPU, you have better CPUs to run on and don't need the partially booted one to get your work done. The exception is to allow smpboot threads to bootstrap the CPU itself and get kernel 'services' initialized before we allow userspace on it. Signed-off-by: Peter Zijlstra (Intel) <[email protected]> Cc: Linus Torvalds <[email protected]> Cc: Paul E. McKenney <[email protected]> Cc: Peter Zijlstra <[email protected]> Cc: Steven Rostedt <[email protected]> Cc: Tejun Heo <[email protected]> Cc: Thomas Gleixner <[email protected]> Fixes: 955dbdf ("sched: Allow migrating kthreads into online but inactive CPUs") Link: http://lkml.kernel.org/r/[email protected] Signed-off-by: Ingo Molnar <[email protected]>
1 parent 786b71f commit 175f0e2

File tree

1 file changed

+30
-12
lines changed

1 file changed

+30
-12
lines changed

kernel/sched/core.c

Lines changed: 30 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -881,6 +881,33 @@ void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags)
881881
}
882882

883883
#ifdef CONFIG_SMP
884+
885+
static inline bool is_per_cpu_kthread(struct task_struct *p)
886+
{
887+
if (!(p->flags & PF_KTHREAD))
888+
return false;
889+
890+
if (p->nr_cpus_allowed != 1)
891+
return false;
892+
893+
return true;
894+
}
895+
896+
/*
897+
* Per-CPU kthreads are allowed to run on !actie && online CPUs, see
898+
* __set_cpus_allowed_ptr() and select_fallback_rq().
899+
*/
900+
static inline bool is_cpu_allowed(struct task_struct *p, int cpu)
901+
{
902+
if (!cpumask_test_cpu(cpu, &p->cpus_allowed))
903+
return false;
904+
905+
if (is_per_cpu_kthread(p))
906+
return cpu_online(cpu);
907+
908+
return cpu_active(cpu);
909+
}
910+
884911
/*
885912
* This is how migration works:
886913
*
@@ -938,16 +965,8 @@ struct migration_arg {
938965
static struct rq *__migrate_task(struct rq *rq, struct rq_flags *rf,
939966
struct task_struct *p, int dest_cpu)
940967
{
941-
if (p->flags & PF_KTHREAD) {
942-
if (unlikely(!cpu_online(dest_cpu)))
943-
return rq;
944-
} else {
945-
if (unlikely(!cpu_active(dest_cpu)))
946-
return rq;
947-
}
948-
949968
/* Affinity changed (again). */
950-
if (!cpumask_test_cpu(dest_cpu, &p->cpus_allowed))
969+
if (!is_cpu_allowed(p, dest_cpu))
951970
return rq;
952971

953972
update_rq_clock(rq);
@@ -1476,10 +1495,9 @@ static int select_fallback_rq(int cpu, struct task_struct *p)
14761495
for (;;) {
14771496
/* Any allowed, online CPU? */
14781497
for_each_cpu(dest_cpu, &p->cpus_allowed) {
1479-
if (!(p->flags & PF_KTHREAD) && !cpu_active(dest_cpu))
1480-
continue;
1481-
if (!cpu_online(dest_cpu))
1498+
if (!is_cpu_allowed(p, dest_cpu))
14821499
continue;
1500+
14831501
goto out;
14841502
}
14851503

0 commit comments

Comments
 (0)