@@ -1981,12 +1981,18 @@ void deactivate_task(struct rq *rq, struct task_struct *p, int flags)
1981
1981
dequeue_task (rq , p , flags );
1982
1982
}
1983
1983
1984
- /*
1985
- * __normal_prio - return the priority that is based on the static prio
1986
- */
1987
- static inline int __normal_prio (struct task_struct * p )
1984
+ static inline int __normal_prio (int policy , int rt_prio , int nice )
1988
1985
{
1989
- return p -> static_prio ;
1986
+ int prio ;
1987
+
1988
+ if (dl_policy (policy ))
1989
+ prio = MAX_DL_PRIO - 1 ;
1990
+ else if (rt_policy (policy ))
1991
+ prio = MAX_RT_PRIO - 1 - rt_prio ;
1992
+ else
1993
+ prio = NICE_TO_PRIO (nice );
1994
+
1995
+ return prio ;
1990
1996
}
1991
1997
1992
1998
/*
@@ -1998,15 +2004,7 @@ static inline int __normal_prio(struct task_struct *p)
1998
2004
*/
1999
2005
static inline int normal_prio (struct task_struct * p )
2000
2006
{
2001
- int prio ;
2002
-
2003
- if (task_has_dl_policy (p ))
2004
- prio = MAX_DL_PRIO - 1 ;
2005
- else if (task_has_rt_policy (p ))
2006
- prio = MAX_RT_PRIO - 1 - p -> rt_priority ;
2007
- else
2008
- prio = __normal_prio (p );
2009
- return prio ;
2007
+ return __normal_prio (p -> policy , p -> rt_priority , PRIO_TO_NICE (p -> static_prio ));
2010
2008
}
2011
2009
2012
2010
/*
@@ -4099,7 +4097,7 @@ int sched_fork(unsigned long clone_flags, struct task_struct *p)
4099
4097
} else if (PRIO_TO_NICE (p -> static_prio ) < 0 )
4100
4098
p -> static_prio = NICE_TO_PRIO (0 );
4101
4099
4102
- p -> prio = p -> normal_prio = __normal_prio ( p ) ;
4100
+ p -> prio = p -> normal_prio = p -> static_prio ;
4103
4101
set_load_weight (p , false);
4104
4102
4105
4103
/*
@@ -6341,6 +6339,18 @@ int default_wake_function(wait_queue_entry_t *curr, unsigned mode, int wake_flag
6341
6339
}
6342
6340
EXPORT_SYMBOL (default_wake_function );
6343
6341
6342
+ static void __setscheduler_prio (struct task_struct * p , int prio )
6343
+ {
6344
+ if (dl_prio (prio ))
6345
+ p -> sched_class = & dl_sched_class ;
6346
+ else if (rt_prio (prio ))
6347
+ p -> sched_class = & rt_sched_class ;
6348
+ else
6349
+ p -> sched_class = & fair_sched_class ;
6350
+
6351
+ p -> prio = prio ;
6352
+ }
6353
+
6344
6354
#ifdef CONFIG_RT_MUTEXES
6345
6355
6346
6356
static inline int __rt_effective_prio (struct task_struct * pi_task , int prio )
@@ -6456,22 +6466,19 @@ void rt_mutex_setprio(struct task_struct *p, struct task_struct *pi_task)
6456
6466
} else {
6457
6467
p -> dl .pi_se = & p -> dl ;
6458
6468
}
6459
- p -> sched_class = & dl_sched_class ;
6460
6469
} else if (rt_prio (prio )) {
6461
6470
if (dl_prio (oldprio ))
6462
6471
p -> dl .pi_se = & p -> dl ;
6463
6472
if (oldprio < prio )
6464
6473
queue_flag |= ENQUEUE_HEAD ;
6465
- p -> sched_class = & rt_sched_class ;
6466
6474
} else {
6467
6475
if (dl_prio (oldprio ))
6468
6476
p -> dl .pi_se = & p -> dl ;
6469
6477
if (rt_prio (oldprio ))
6470
6478
p -> rt .timeout = 0 ;
6471
- p -> sched_class = & fair_sched_class ;
6472
6479
}
6473
6480
6474
- p -> prio = prio ;
6481
+ __setscheduler_prio ( p , prio ) ;
6475
6482
6476
6483
if (queued )
6477
6484
enqueue_task (rq , p , queue_flag );
@@ -6824,35 +6831,6 @@ static void __setscheduler_params(struct task_struct *p,
6824
6831
set_load_weight (p , true);
6825
6832
}
6826
6833
6827
- /* Actually do priority change: must hold pi & rq lock. */
6828
- static void __setscheduler (struct rq * rq , struct task_struct * p ,
6829
- const struct sched_attr * attr , bool keep_boost )
6830
- {
6831
- /*
6832
- * If params can't change scheduling class changes aren't allowed
6833
- * either.
6834
- */
6835
- if (attr -> sched_flags & SCHED_FLAG_KEEP_PARAMS )
6836
- return ;
6837
-
6838
- __setscheduler_params (p , attr );
6839
-
6840
- /*
6841
- * Keep a potential priority boosting if called from
6842
- * sched_setscheduler().
6843
- */
6844
- p -> prio = normal_prio (p );
6845
- if (keep_boost )
6846
- p -> prio = rt_effective_prio (p , p -> prio );
6847
-
6848
- if (dl_prio (p -> prio ))
6849
- p -> sched_class = & dl_sched_class ;
6850
- else if (rt_prio (p -> prio ))
6851
- p -> sched_class = & rt_sched_class ;
6852
- else
6853
- p -> sched_class = & fair_sched_class ;
6854
- }
6855
-
6856
6834
/*
6857
6835
* Check the target process has a UID that matches the current process's:
6858
6836
*/
@@ -6873,10 +6851,8 @@ static int __sched_setscheduler(struct task_struct *p,
6873
6851
const struct sched_attr * attr ,
6874
6852
bool user , bool pi )
6875
6853
{
6876
- int newprio = dl_policy (attr -> sched_policy ) ? MAX_DL_PRIO - 1 :
6877
- MAX_RT_PRIO - 1 - attr -> sched_priority ;
6878
- int retval , oldprio , oldpolicy = -1 , queued , running ;
6879
- int new_effective_prio , policy = attr -> sched_policy ;
6854
+ int oldpolicy = -1 , policy = attr -> sched_policy ;
6855
+ int retval , oldprio , newprio , queued , running ;
6880
6856
const struct sched_class * prev_class ;
6881
6857
struct callback_head * head ;
6882
6858
struct rq_flags rf ;
@@ -7074,6 +7050,7 @@ static int __sched_setscheduler(struct task_struct *p,
7074
7050
p -> sched_reset_on_fork = reset_on_fork ;
7075
7051
oldprio = p -> prio ;
7076
7052
7053
+ newprio = __normal_prio (policy , attr -> sched_priority , attr -> sched_nice );
7077
7054
if (pi ) {
7078
7055
/*
7079
7056
* Take priority boosted tasks into account. If the new
@@ -7082,8 +7059,8 @@ static int __sched_setscheduler(struct task_struct *p,
7082
7059
* the runqueue. This will be done when the task deboost
7083
7060
* itself.
7084
7061
*/
7085
- new_effective_prio = rt_effective_prio (p , newprio );
7086
- if (new_effective_prio == oldprio )
7062
+ newprio = rt_effective_prio (p , newprio );
7063
+ if (newprio == oldprio )
7087
7064
queue_flags &= ~DEQUEUE_MOVE ;
7088
7065
}
7089
7066
@@ -7096,7 +7073,10 @@ static int __sched_setscheduler(struct task_struct *p,
7096
7073
7097
7074
prev_class = p -> sched_class ;
7098
7075
7099
- __setscheduler (rq , p , attr , pi );
7076
+ if (!(attr -> sched_flags & SCHED_FLAG_KEEP_PARAMS )) {
7077
+ __setscheduler_params (p , attr );
7078
+ __setscheduler_prio (p , newprio );
7079
+ }
7100
7080
__setscheduler_uclamp (p , attr );
7101
7081
7102
7082
if (queued ) {
0 commit comments