Skip to content

Commit fe2eea3

Browse files
Christoph LameterLinus Torvalds
authored andcommitted
[PATCH] sched: disable interrupts for locking in load_balance()
Interrupts must be disabled for request queue locks if we want to run load_balance() with interrupts enabled. Signed-off-by: Christoph Lameter <[email protected]> Cc: Peter Williams <[email protected]> Cc: Nick Piggin <[email protected]> Cc: Christoph Lameter <[email protected]> Cc: "Siddha, Suresh B" <[email protected]> Cc: "Chen, Kenneth W" <[email protected]> Acked-by: Ingo Molnar <[email protected]> Cc: KAMEZAWA Hiroyuki <[email protected]> Signed-off-by: Andrew Morton <[email protected]> Signed-off-by: Linus Torvalds <[email protected]>
1 parent 4211a9a commit fe2eea3

File tree

1 file changed

+6
-5
lines changed

1 file changed

+6
-5
lines changed

kernel/sched.c

Lines changed: 6 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -2546,8 +2546,6 @@ static inline unsigned long minus_1_or_zero(unsigned long n)
25462546
/*
25472547
* Check this_cpu to ensure it is balanced within domain. Attempt to move
25482548
* tasks if there is an imbalance.
2549-
*
2550-
* Called with this_rq unlocked.
25512549
*/
25522550
static int load_balance(int this_cpu, struct rq *this_rq,
25532551
struct sched_domain *sd, enum idle_type idle)
@@ -2557,6 +2555,7 @@ static int load_balance(int this_cpu, struct rq *this_rq,
25572555
unsigned long imbalance;
25582556
struct rq *busiest;
25592557
cpumask_t cpus = CPU_MASK_ALL;
2558+
unsigned long flags;
25602559

25612560
/*
25622561
* When power savings policy is enabled for the parent domain, idle
@@ -2596,11 +2595,13 @@ static int load_balance(int this_cpu, struct rq *this_rq,
25962595
* still unbalanced. nr_moved simply stays zero, so it is
25972596
* correctly treated as an imbalance.
25982597
*/
2598+
local_irq_save(flags);
25992599
double_rq_lock(this_rq, busiest);
26002600
nr_moved = move_tasks(this_rq, this_cpu, busiest,
26012601
minus_1_or_zero(busiest->nr_running),
26022602
imbalance, sd, idle, &all_pinned);
26032603
double_rq_unlock(this_rq, busiest);
2604+
local_irq_restore(flags);
26042605

26052606
/* All tasks on this runqueue were pinned by CPU affinity */
26062607
if (unlikely(all_pinned)) {
@@ -2617,13 +2618,13 @@ static int load_balance(int this_cpu, struct rq *this_rq,
26172618

26182619
if (unlikely(sd->nr_balance_failed > sd->cache_nice_tries+2)) {
26192620

2620-
spin_lock(&busiest->lock);
2621+
spin_lock_irqsave(&busiest->lock, flags);
26212622

26222623
/* don't kick the migration_thread, if the curr
26232624
* task on busiest cpu can't be moved to this_cpu
26242625
*/
26252626
if (!cpu_isset(this_cpu, busiest->curr->cpus_allowed)) {
2626-
spin_unlock(&busiest->lock);
2627+
spin_unlock_irqrestore(&busiest->lock, flags);
26272628
all_pinned = 1;
26282629
goto out_one_pinned;
26292630
}
@@ -2633,7 +2634,7 @@ static int load_balance(int this_cpu, struct rq *this_rq,
26332634
busiest->push_cpu = this_cpu;
26342635
active_balance = 1;
26352636
}
2636-
spin_unlock(&busiest->lock);
2637+
spin_unlock_irqrestore(&busiest->lock, flags);
26372638
if (active_balance)
26382639
wake_up_process(busiest->migration_thread);
26392640

0 commit comments

Comments
 (0)