Skip to content

Commit f12e148

Browse files
author
Peter Zijlstra
committed
sched/fair: Prepare pick_next_task() for delayed dequeue
Delayed dequeue's natural end is when it gets picked again. Ensure pick_next_task() knows what to do with delayed tasks. Note, this relies on the earlier patch that made pick_next_task() state invariant -- it will restart the pick on dequeue, because obviously the just dequeued task is no longer eligible. Signed-off-by: Peter Zijlstra (Intel) <[email protected]> Reviewed-by: Valentin Schneider <[email protected]> Tested-by: Valentin Schneider <[email protected]> Link: https://lkml.kernel.org/r/[email protected]
1 parent 2e0199d commit f12e148

File tree

1 file changed

+19
-4
lines changed

1 file changed

+19
-4
lines changed

kernel/sched/fair.c

Lines changed: 19 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -5473,6 +5473,8 @@ set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
54735473
se->prev_sum_exec_runtime = se->sum_exec_runtime;
54745474
}
54755475

5476+
static int dequeue_entities(struct rq *rq, struct sched_entity *se, int flags);
5477+
54765478
/*
54775479
* Pick the next process, keeping these things in mind, in this order:
54785480
* 1) keep things fair between processes/task groups
@@ -5481,16 +5483,27 @@ set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
54815483
* 4) do not run the "skip" process, if something else is available
54825484
*/
54835485
static struct sched_entity *
5484-
pick_next_entity(struct cfs_rq *cfs_rq)
5486+
pick_next_entity(struct rq *rq, struct cfs_rq *cfs_rq)
54855487
{
54865488
/*
54875489
* Enabling NEXT_BUDDY will affect latency but not fairness.
54885490
*/
54895491
if (sched_feat(NEXT_BUDDY) &&
5490-
cfs_rq->next && entity_eligible(cfs_rq, cfs_rq->next))
5492+
cfs_rq->next && entity_eligible(cfs_rq, cfs_rq->next)) {
5493+
/* ->next will never be delayed */
5494+
SCHED_WARN_ON(cfs_rq->next->sched_delayed);
54915495
return cfs_rq->next;
5496+
}
5497+
5498+
struct sched_entity *se = pick_eevdf(cfs_rq);
5499+
if (se->sched_delayed) {
5500+
dequeue_entities(rq, se, DEQUEUE_SLEEP | DEQUEUE_DELAYED);
5501+
SCHED_WARN_ON(se->sched_delayed);
5502+
SCHED_WARN_ON(se->on_rq);
54925503

5493-
return pick_eevdf(cfs_rq);
5504+
return NULL;
5505+
}
5506+
return se;
54945507
}
54955508

54965509
static bool check_cfs_rq_runtime(struct cfs_rq *cfs_rq);
@@ -8507,7 +8520,9 @@ static struct task_struct *pick_task_fair(struct rq *rq)
85078520
if (unlikely(check_cfs_rq_runtime(cfs_rq)))
85088521
goto again;
85098522

8510-
se = pick_next_entity(cfs_rq);
8523+
se = pick_next_entity(rq, cfs_rq);
8524+
if (!se)
8525+
goto again;
85118526
cfs_rq = group_cfs_rq(se);
85128527
} while (cfs_rq);
85138528

0 commit comments

Comments
 (0)