@@ -5289,6 +5289,9 @@ static inline int cfs_rq_throttled(struct cfs_rq *cfs_rq);
5289
5289
5290
5290
static inline bool cfs_bandwidth_used (void );
5291
5291
5292
+ static void
5293
+ requeue_delayed_entity (struct sched_entity * se );
5294
+
5292
5295
static void
5293
5296
enqueue_entity (struct cfs_rq * cfs_rq , struct sched_entity * se , int flags )
5294
5297
{
@@ -5922,8 +5925,10 @@ void unthrottle_cfs_rq(struct cfs_rq *cfs_rq)
5922
5925
for_each_sched_entity (se ) {
5923
5926
struct cfs_rq * qcfs_rq = cfs_rq_of (se );
5924
5927
5925
- if (se -> on_rq )
5928
+ if (se -> on_rq ) {
5929
+ SCHED_WARN_ON (se -> sched_delayed );
5926
5930
break ;
5931
+ }
5927
5932
enqueue_entity (qcfs_rq , se , ENQUEUE_WAKEUP );
5928
5933
5929
5934
if (cfs_rq_is_idle (group_cfs_rq (se )))
@@ -6773,6 +6778,22 @@ static int sched_idle_cpu(int cpu)
6773
6778
}
6774
6779
#endif
6775
6780
6781
+ static void
6782
+ requeue_delayed_entity (struct sched_entity * se )
6783
+ {
6784
+ struct cfs_rq * cfs_rq = cfs_rq_of (se );
6785
+
6786
+ /*
6787
+ * se->sched_delayed should imply: se->on_rq == 1.
6788
+ * Because a delayed entity is one that is still on
6789
+ * the runqueue competing until elegibility.
6790
+ */
6791
+ SCHED_WARN_ON (!se -> sched_delayed );
6792
+ SCHED_WARN_ON (!se -> on_rq );
6793
+
6794
+ se -> sched_delayed = 0 ;
6795
+ }
6796
+
6776
6797
/*
6777
6798
* The enqueue_task method is called before nr_running is
6778
6799
* increased. Here we update the fair scheduling stats and
@@ -6787,6 +6808,11 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
6787
6808
int task_new = !(flags & ENQUEUE_WAKEUP );
6788
6809
int rq_h_nr_running = rq -> cfs .h_nr_running ;
6789
6810
6811
+ if (flags & ENQUEUE_DELAYED ) {
6812
+ requeue_delayed_entity (se );
6813
+ return ;
6814
+ }
6815
+
6790
6816
/*
6791
6817
* The code below (indirectly) updates schedutil which looks at
6792
6818
* the cfs_rq utilization to select a frequency.
@@ -6804,8 +6830,11 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
6804
6830
cpufreq_update_util (rq , SCHED_CPUFREQ_IOWAIT );
6805
6831
6806
6832
for_each_sched_entity (se ) {
6807
- if (se -> on_rq )
6833
+ if (se -> on_rq ) {
6834
+ if (se -> sched_delayed )
6835
+ requeue_delayed_entity (se );
6808
6836
break ;
6837
+ }
6809
6838
cfs_rq = cfs_rq_of (se );
6810
6839
enqueue_entity (cfs_rq , se , flags );
6811
6840
0 commit comments