Skip to content

Commit 5d6523e

Browse files
Peter ZijlstraIngo Molnar
authored andcommitted
sched: Fix load-balance wreckage
Commit 367456c ("sched: Ditch per cgroup task lists for load-balancing") completely wrecked load-balancing due to a few silly mistakes. Correct those and remove more pointless code. Signed-off-by: Peter Zijlstra <[email protected]> Link: http://lkml.kernel.org/n/[email protected] Signed-off-by: Ingo Molnar <[email protected]>
1 parent 2e5b5b3 commit 5d6523e

File tree

1 file changed

+39
-71
lines changed

1 file changed

+39
-71
lines changed

kernel/sched/fair.c

Lines changed: 39 additions & 71 deletions
Original file line numberDiff line numberDiff line change
@@ -784,7 +784,7 @@ account_entity_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se)
784784
update_load_add(&rq_of(cfs_rq)->load, se->load.weight);
785785
#ifdef CONFIG_SMP
786786
if (entity_is_task(se))
787-
list_add(&se->group_node, &rq_of(cfs_rq)->cfs_tasks);
787+
list_add_tail(&se->group_node, &rq_of(cfs_rq)->cfs_tasks);
788788
#endif
789789
cfs_rq->nr_running++;
790790
}
@@ -3071,7 +3071,6 @@ static unsigned long __read_mostly max_load_balance_interval = HZ/10;
30713071

30723072
#define LBF_ALL_PINNED 0x01
30733073
#define LBF_NEED_BREAK 0x02
3074-
#define LBF_ABORT 0x04
30753074

30763075
struct lb_env {
30773076
struct sched_domain *sd;
@@ -3083,7 +3082,7 @@ struct lb_env {
30833082
struct rq *dst_rq;
30843083

30853084
enum cpu_idle_type idle;
3086-
unsigned long max_load_move;
3085+
long load_move;
30873086
unsigned int flags;
30883087

30893088
unsigned int loop;
@@ -3216,80 +3215,86 @@ static int move_one_task(struct lb_env *env)
32163215

32173216
static unsigned long task_h_load(struct task_struct *p);
32183217

3219-
static unsigned long balance_tasks(struct lb_env *env)
3218+
/*
3219+
* move_tasks tries to move up to load_move weighted load from busiest to
3220+
* this_rq, as part of a balancing operation within domain "sd".
3221+
* Returns 1 if successful and 0 otherwise.
3222+
*
3223+
* Called with both runqueues locked.
3224+
*/
3225+
static int move_tasks(struct lb_env *env)
32203226
{
3221-
long rem_load_move = env->max_load_move;
3222-
struct task_struct *p, *n;
3227+
struct list_head *tasks = &env->src_rq->cfs_tasks;
3228+
struct task_struct *p;
32233229
unsigned long load;
32243230
int pulled = 0;
32253231

3226-
if (env->max_load_move == 0)
3227-
goto out;
3232+
if (env->load_move <= 0)
3233+
return 0;
3234+
3235+
while (!list_empty(tasks)) {
3236+
p = list_first_entry(tasks, struct task_struct, se.group_node);
32283237

3229-
list_for_each_entry_safe(p, n, &env->src_rq->cfs_tasks, se.group_node) {
32303238
env->loop++;
32313239
/* We've more or less seen every task there is, call it quits */
3232-
if (env->loop > env->loop_max) {
3233-
env->flags |= LBF_ABORT;
3240+
if (env->loop > env->loop_max)
32343241
break;
3235-
}
3236-
/* take a beather every nr_migrate tasks */
3242+
3243+
/* take a breather every nr_migrate tasks */
32373244
if (env->loop > env->loop_break) {
32383245
env->loop_break += sysctl_sched_nr_migrate;
32393246
env->flags |= LBF_NEED_BREAK;
32403247
break;
32413248
}
32423249

3243-
if (throttled_lb_pair(task_group(p), env->src_rq->cpu,
3244-
env->dst_cpu))
3250+
if (throttled_lb_pair(task_group(p), env->src_cpu, env->dst_cpu))
32453251
goto next;
32463252

32473253
load = task_h_load(p);
3254+
32483255
if (load < 16 && !env->sd->nr_balance_failed)
32493256
goto next;
32503257

3251-
if ((load * 2) > rem_load_move)
3258+
if ((load / 2) > env->load_move)
32523259
goto next;
32533260

32543261
if (!can_migrate_task(p, env))
32553262
goto next;
32563263

32573264
move_task(p, env);
32583265
pulled++;
3259-
rem_load_move -= load;
3266+
env->load_move -= load;
32603267

32613268
#ifdef CONFIG_PREEMPT
32623269
/*
32633270
* NEWIDLE balancing is a source of latency, so preemptible
32643271
* kernels will stop after the first task is pulled to minimize
32653272
* the critical section.
32663273
*/
3267-
if (env->idle == CPU_NEWLY_IDLE) {
3268-
env->flags |= LBF_ABORT;
3274+
if (env->idle == CPU_NEWLY_IDLE)
32693275
break;
3270-
}
32713276
#endif
32723277

32733278
/*
32743279
* We only want to steal up to the prescribed amount of
32753280
* weighted load.
32763281
*/
3277-
if (rem_load_move <= 0)
3282+
if (env->load_move <= 0)
32783283
break;
32793284

32803285
continue;
32813286
next:
3282-
list_move_tail(&p->se.group_node, &env->src_rq->cfs_tasks);
3287+
list_move_tail(&p->se.group_node, tasks);
32833288
}
3284-
out:
3289+
32853290
/*
32863291
* Right now, this is one of only two places move_task() is called,
32873292
* so we can safely collect move_task() stats here rather than
32883293
* inside move_task().
32893294
*/
32903295
schedstat_add(env->sd, lb_gained[env->idle], pulled);
32913296

3292-
return env->max_load_move - rem_load_move;
3297+
return pulled;
32933298
}
32943299

32953300
#ifdef CONFIG_FAIR_GROUP_SCHED
@@ -3399,43 +3404,6 @@ static unsigned long task_h_load(struct task_struct *p)
33993404
}
34003405
#endif
34013406

3402-
/*
3403-
* move_tasks tries to move up to max_load_move weighted load from busiest to
3404-
* this_rq, as part of a balancing operation within domain "sd".
3405-
* Returns 1 if successful and 0 otherwise.
3406-
*
3407-
* Called with both runqueues locked.
3408-
*/
3409-
static int move_tasks(struct lb_env *env)
3410-
{
3411-
unsigned long max_load_move = env->max_load_move;
3412-
unsigned long total_load_moved = 0, load_moved;
3413-
3414-
update_h_load(cpu_of(env->src_rq));
3415-
do {
3416-
env->max_load_move = max_load_move - total_load_moved;
3417-
load_moved = balance_tasks(env);
3418-
total_load_moved += load_moved;
3419-
3420-
if (env->flags & (LBF_NEED_BREAK|LBF_ABORT))
3421-
break;
3422-
3423-
#ifdef CONFIG_PREEMPT
3424-
/*
3425-
* NEWIDLE balancing is a source of latency, so preemptible
3426-
* kernels will stop after the first task is pulled to minimize
3427-
* the critical section.
3428-
*/
3429-
if (env->idle == CPU_NEWLY_IDLE && env->dst_rq->nr_running) {
3430-
env->flags |= LBF_ABORT;
3431-
break;
3432-
}
3433-
#endif
3434-
} while (load_moved && max_load_move > total_load_moved);
3435-
3436-
return total_load_moved > 0;
3437-
}
3438-
34393407
/********** Helpers for find_busiest_group ************************/
34403408
/*
34413409
* sd_lb_stats - Structure to store the statistics of a sched_domain
@@ -4477,31 +4445,31 @@ static int load_balance(int this_cpu, struct rq *this_rq,
44774445
* correctly treated as an imbalance.
44784446
*/
44794447
env.flags |= LBF_ALL_PINNED;
4480-
env.max_load_move = imbalance;
4448+
env.load_move = imbalance;
44814449
env.src_cpu = busiest->cpu;
44824450
env.src_rq = busiest;
44834451
env.loop_max = busiest->nr_running;
44844452

4453+
more_balance:
44854454
local_irq_save(flags);
44864455
double_rq_lock(this_rq, busiest);
4487-
ld_moved = move_tasks(&env);
4456+
if (!env.loop)
4457+
update_h_load(env.src_cpu);
4458+
ld_moved += move_tasks(&env);
44884459
double_rq_unlock(this_rq, busiest);
44894460
local_irq_restore(flags);
44904461

4462+
if (env.flags & LBF_NEED_BREAK) {
4463+
env.flags &= ~LBF_NEED_BREAK;
4464+
goto more_balance;
4465+
}
4466+
44914467
/*
44924468
* some other cpu did the load balance for us.
44934469
*/
44944470
if (ld_moved && this_cpu != smp_processor_id())
44954471
resched_cpu(this_cpu);
44964472

4497-
if (env.flags & LBF_ABORT)
4498-
goto out_balanced;
4499-
4500-
if (env.flags & LBF_NEED_BREAK) {
4501-
env.flags &= ~LBF_NEED_BREAK;
4502-
goto redo;
4503-
}
4504-
45054473
/* All tasks on this runqueue were pinned by CPU affinity */
45064474
if (unlikely(env.flags & LBF_ALL_PINNED)) {
45074475
cpumask_clear_cpu(cpu_of(busiest), cpus);

0 commit comments

Comments
 (0)