@@ -5363,6 +5363,10 @@ static int wake_wide(struct task_struct *p)
5363
5363
*
5364
5364
* wake_affine_idle() - only considers 'now', it check if the waking CPU is (or
5365
5365
* will be) idle.
5366
+ *
5367
+ * wake_affine_weight() - considers the weight to reflect the average
5368
+ * scheduling latency of the CPUs. This seems to work
5369
+ * for the overloaded case.
5366
5370
*/
5367
5371
5368
5372
static bool
@@ -5378,6 +5382,40 @@ wake_affine_idle(struct sched_domain *sd, struct task_struct *p,
5378
5382
return false;
5379
5383
}
5380
5384
5385
+ static bool
5386
+ wake_affine_weight (struct sched_domain * sd , struct task_struct * p ,
5387
+ int this_cpu , int prev_cpu , int sync )
5388
+ {
5389
+ s64 this_eff_load , prev_eff_load ;
5390
+ unsigned long task_load ;
5391
+
5392
+ this_eff_load = target_load (this_cpu , sd -> wake_idx );
5393
+ prev_eff_load = source_load (prev_cpu , sd -> wake_idx );
5394
+
5395
+ if (sync ) {
5396
+ unsigned long current_load = task_h_load (current );
5397
+
5398
+ if (current_load > this_eff_load )
5399
+ return true;
5400
+
5401
+ this_eff_load -= current_load ;
5402
+ }
5403
+
5404
+ task_load = task_h_load (p );
5405
+
5406
+ this_eff_load += task_load ;
5407
+ if (sched_feat (WA_BIAS ))
5408
+ this_eff_load *= 100 ;
5409
+ this_eff_load *= capacity_of (prev_cpu );
5410
+
5411
+ prev_eff_load -= task_load ;
5412
+ if (sched_feat (WA_BIAS ))
5413
+ prev_eff_load *= 100 + (sd -> imbalance_pct - 100 ) / 2 ;
5414
+ prev_eff_load *= capacity_of (this_cpu );
5415
+
5416
+ return this_eff_load <= prev_eff_load ;
5417
+ }
5418
+
5381
5419
static int wake_affine (struct sched_domain * sd , struct task_struct * p ,
5382
5420
int prev_cpu , int sync )
5383
5421
{
@@ -5387,6 +5425,9 @@ static int wake_affine(struct sched_domain *sd, struct task_struct *p,
5387
5425
if (sched_feat (WA_IDLE ) && !affine )
5388
5426
affine = wake_affine_idle (sd , p , this_cpu , prev_cpu , sync );
5389
5427
5428
+ if (sched_feat (WA_WEIGHT ) && !affine )
5429
+ affine = wake_affine_weight (sd , p , this_cpu , prev_cpu , sync );
5430
+
5390
5431
schedstat_inc (p -> se .statistics .nr_wakeups_affine_attempts );
5391
5432
if (affine ) {
5392
5433
schedstat_inc (sd -> ttwu_move_affine );
0 commit comments