@@ -2266,11 +2266,20 @@ static inline void cpufreq_update_util(struct rq *rq, unsigned int flags) {}
2266
2266
#endif /* CONFIG_CPU_FREQ */
2267
2267
2268
2268
#ifdef CONFIG_UCLAMP_TASK
2269
- static inline unsigned int uclamp_util (struct rq * rq , unsigned int util )
2269
+ unsigned int uclamp_eff_value (struct task_struct * p , unsigned int clamp_id );
2270
+
2271
+ static __always_inline
2272
+ unsigned int uclamp_util_with (struct rq * rq , unsigned int util ,
2273
+ struct task_struct * p )
2270
2274
{
2271
2275
unsigned int min_util = READ_ONCE (rq -> uclamp [UCLAMP_MIN ].value );
2272
2276
unsigned int max_util = READ_ONCE (rq -> uclamp [UCLAMP_MAX ].value );
2273
2277
2278
+ if (p ) {
2279
+ min_util = max (min_util , uclamp_eff_value (p , UCLAMP_MIN ));
2280
+ max_util = max (max_util , uclamp_eff_value (p , UCLAMP_MAX ));
2281
+ }
2282
+
2274
2283
/*
2275
2284
* Since CPU's {min,max}_util clamps are MAX aggregated considering
2276
2285
* RUNNABLE tasks with _different_ clamps, we can end up with an
@@ -2281,7 +2290,17 @@ static inline unsigned int uclamp_util(struct rq *rq, unsigned int util)
2281
2290
2282
2291
return clamp (util , min_util , max_util );
2283
2292
}
2293
+
2294
+ static inline unsigned int uclamp_util (struct rq * rq , unsigned int util )
2295
+ {
2296
+ return uclamp_util_with (rq , util , NULL );
2297
+ }
2284
2298
#else /* CONFIG_UCLAMP_TASK */
2299
+ static inline unsigned int uclamp_util_with (struct rq * rq , unsigned int util ,
2300
+ struct task_struct * p )
2301
+ {
2302
+ return util ;
2303
+ }
2285
2304
static inline unsigned int uclamp_util (struct rq * rq , unsigned int util )
2286
2305
{
2287
2306
return util ;
0 commit comments