@@ -5765,8 +5765,8 @@ static int wake_affine(struct sched_domain *sd, struct task_struct *p,
5765
5765
return affine ;
5766
5766
}
5767
5767
5768
- static inline int task_util (struct task_struct * p );
5769
- static int cpu_util_wake (int cpu , struct task_struct * p );
5768
+ static inline unsigned long task_util (struct task_struct * p );
5769
+ static unsigned long cpu_util_wake (int cpu , struct task_struct * p );
5770
5770
5771
5771
static unsigned long capacity_spare_wake (int cpu , struct task_struct * p )
5772
5772
{
@@ -6247,15 +6247,15 @@ static int select_idle_sibling(struct task_struct *p, int prev, int target)
6247
6247
* capacity_orig) as it useful for predicting the capacity required after task
6248
6248
* migrations (scheduler-driven DVFS).
6249
6249
*/
6250
- static int cpu_util (int cpu )
6250
+ static unsigned long cpu_util (int cpu )
6251
6251
{
6252
6252
unsigned long util = cpu_rq (cpu )-> cfs .avg .util_avg ;
6253
6253
unsigned long capacity = capacity_orig_of (cpu );
6254
6254
6255
6255
return (util >= capacity ) ? capacity : util ;
6256
6256
}
6257
6257
6258
- static inline int task_util (struct task_struct * p )
6258
+ static inline unsigned long task_util (struct task_struct * p )
6259
6259
{
6260
6260
return p -> se .avg .util_avg ;
6261
6261
}
@@ -6264,7 +6264,7 @@ static inline int task_util(struct task_struct *p)
6264
6264
* cpu_util_wake: Compute cpu utilization with any contributions from
6265
6265
* the waking task p removed.
6266
6266
*/
6267
- static int cpu_util_wake (int cpu , struct task_struct * p )
6267
+ static unsigned long cpu_util_wake (int cpu , struct task_struct * p )
6268
6268
{
6269
6269
unsigned long util , capacity ;
6270
6270
0 commit comments