This repository was archived by the owner on Nov 8, 2023. It is now read-only.
File tree Expand file tree Collapse file tree 2 files changed +1
-26
lines changed Expand file tree Collapse file tree 2 files changed +1
-26
lines changed Original file line number Diff line number Diff line change @@ -906,16 +906,6 @@ static u64 sched_slice(struct cfs_rq *cfs_rq, struct sched_entity *se)
906
906
return slice ;
907
907
}
908
908
909
- /*
910
- * We calculate the vruntime slice of a to-be-inserted task.
911
- *
912
- * vs = s/w
913
- */
914
- static u64 sched_vslice (struct cfs_rq * cfs_rq , struct sched_entity * se )
915
- {
916
- return calc_delta_fair (sched_slice (cfs_rq , se ), se );
917
- }
918
-
919
909
#include "pelt.h"
920
910
#ifdef CONFIG_SMP
921
911
@@ -4862,16 +4852,7 @@ static inline bool entity_is_long_sleeper(struct sched_entity *se)
4862
4852
static void
4863
4853
place_entity (struct cfs_rq * cfs_rq , struct sched_entity * se , int initial )
4864
4854
{
4865
- u64 vruntime = cfs_rq -> min_vruntime ;
4866
-
4867
- /*
4868
- * The 'current' period is already promised to the current tasks,
4869
- * however the extra weight of the new task will slow them down a
4870
- * little, place the new task so that it fits in the slot that
4871
- * stays open at the end.
4872
- */
4873
- if (initial && sched_feat (START_DEBIT ))
4874
- vruntime += sched_vslice (cfs_rq , se );
4855
+ u64 vruntime = avg_vruntime (cfs_rq );
4875
4856
4876
4857
/* sleeps up to a single latency don't count. */
4877
4858
if (!initial ) {
Original file line number Diff line number Diff line change 6
6
*/
7
7
SCHED_FEAT (GENTLE_FAIR_SLEEPERS , true)
8
8
9
- /*
10
- * Place new tasks ahead so that they do not starve already running
11
- * tasks
12
- */
13
- SCHED_FEAT (START_DEBIT , true)
14
-
15
9
/*
16
10
* Prefer to schedule the task we woke last (assuming it failed
17
11
* wakeup-preemption), since its likely going to consume data we
You can’t perform that action at this time.
0 commit comments