@@ -2546,8 +2546,6 @@ static inline unsigned long minus_1_or_zero(unsigned long n)
2546
2546
/*
2547
2547
* Check this_cpu to ensure it is balanced within domain. Attempt to move
2548
2548
* tasks if there is an imbalance.
2549
- *
2550
- * Called with this_rq unlocked.
2551
2549
*/
2552
2550
static int load_balance (int this_cpu , struct rq * this_rq ,
2553
2551
struct sched_domain * sd , enum idle_type idle )
@@ -2557,6 +2555,7 @@ static int load_balance(int this_cpu, struct rq *this_rq,
2557
2555
unsigned long imbalance ;
2558
2556
struct rq * busiest ;
2559
2557
cpumask_t cpus = CPU_MASK_ALL ;
2558
+ unsigned long flags ;
2560
2559
2561
2560
/*
2562
2561
* When power savings policy is enabled for the parent domain, idle
@@ -2596,11 +2595,13 @@ static int load_balance(int this_cpu, struct rq *this_rq,
2596
2595
* still unbalanced. nr_moved simply stays zero, so it is
2597
2596
* correctly treated as an imbalance.
2598
2597
*/
2598
+ local_irq_save (flags );
2599
2599
double_rq_lock (this_rq , busiest );
2600
2600
nr_moved = move_tasks (this_rq , this_cpu , busiest ,
2601
2601
minus_1_or_zero (busiest -> nr_running ),
2602
2602
imbalance , sd , idle , & all_pinned );
2603
2603
double_rq_unlock (this_rq , busiest );
2604
+ local_irq_restore (flags );
2604
2605
2605
2606
/* All tasks on this runqueue were pinned by CPU affinity */
2606
2607
if (unlikely (all_pinned )) {
@@ -2617,13 +2618,13 @@ static int load_balance(int this_cpu, struct rq *this_rq,
2617
2618
2618
2619
if (unlikely (sd -> nr_balance_failed > sd -> cache_nice_tries + 2 )) {
2619
2620
2620
- spin_lock (& busiest -> lock );
2621
+ spin_lock_irqsave (& busiest -> lock , flags );
2621
2622
2622
2623
/* don't kick the migration_thread, if the curr
2623
2624
* task on busiest cpu can't be moved to this_cpu
2624
2625
*/
2625
2626
if (!cpu_isset (this_cpu , busiest -> curr -> cpus_allowed )) {
2626
- spin_unlock (& busiest -> lock );
2627
+ spin_unlock_irqrestore (& busiest -> lock , flags );
2627
2628
all_pinned = 1 ;
2628
2629
goto out_one_pinned ;
2629
2630
}
@@ -2633,7 +2634,7 @@ static int load_balance(int this_cpu, struct rq *this_rq,
2633
2634
busiest -> push_cpu = this_cpu ;
2634
2635
active_balance = 1 ;
2635
2636
}
2636
- spin_unlock (& busiest -> lock );
2637
+ spin_unlock_irqrestore (& busiest -> lock , flags );
2637
2638
if (active_balance )
2638
2639
wake_up_process (busiest -> migration_thread );
2639
2640
0 commit comments