@@ -2526,20 +2526,19 @@ static void intel_cpufreq_trace(struct cpudata *cpu, unsigned int trace_type, in
2526
2526
fp_toint (cpu -> iowait_boost * 100 ));
2527
2527
}
2528
2528
2529
- static void intel_cpufreq_adjust_hwp (struct cpudata * cpu , u32 target_pstate ,
2530
- bool strict , bool fast_switch )
2529
+ static void intel_cpufreq_adjust_hwp (struct cpudata * cpu , u32 min , u32 max ,
2530
+ u32 desired , bool fast_switch )
2531
2531
{
2532
2532
u64 prev = READ_ONCE (cpu -> hwp_req_cached ), value = prev ;
2533
2533
2534
2534
value &= ~HWP_MIN_PERF (~0L );
2535
- value |= HWP_MIN_PERF (target_pstate );
2535
+ value |= HWP_MIN_PERF (min );
2536
2536
2537
- /*
2538
- * The entire MSR needs to be updated in order to update the HWP min
2539
- * field in it, so opportunistically update the max too if needed.
2540
- */
2541
2537
value &= ~HWP_MAX_PERF (~0L );
2542
- value |= HWP_MAX_PERF (strict ? target_pstate : cpu -> max_perf_ratio );
2538
+ value |= HWP_MAX_PERF (max );
2539
+
2540
+ value &= ~HWP_DESIRED_PERF (~0L );
2541
+ value |= HWP_DESIRED_PERF (desired );
2543
2542
2544
2543
if (value == prev )
2545
2544
return ;
@@ -2569,11 +2568,15 @@ static int intel_cpufreq_update_pstate(struct cpufreq_policy *policy,
2569
2568
int old_pstate = cpu -> pstate .current_pstate ;
2570
2569
2571
2570
target_pstate = intel_pstate_prepare_request (cpu , target_pstate );
2572
- if (hwp_active )
2573
- intel_cpufreq_adjust_hwp (cpu , target_pstate ,
2574
- policy -> strict_target , fast_switch );
2575
- else if (target_pstate != old_pstate )
2571
+ if (hwp_active ) {
2572
+ int max_pstate = policy -> strict_target ?
2573
+ target_pstate : cpu -> max_perf_ratio ;
2574
+
2575
+ intel_cpufreq_adjust_hwp (cpu , target_pstate , max_pstate , 0 ,
2576
+ fast_switch );
2577
+ } else if (target_pstate != old_pstate ) {
2576
2578
intel_cpufreq_adjust_perf_ctl (cpu , target_pstate , fast_switch );
2579
+ }
2577
2580
2578
2581
cpu -> pstate .current_pstate = target_pstate ;
2579
2582
@@ -2634,6 +2637,47 @@ static unsigned int intel_cpufreq_fast_switch(struct cpufreq_policy *policy,
2634
2637
return target_pstate * cpu -> pstate .scaling ;
2635
2638
}
2636
2639
2640
+ static void intel_cpufreq_adjust_perf (unsigned int cpunum ,
2641
+ unsigned long min_perf ,
2642
+ unsigned long target_perf ,
2643
+ unsigned long capacity )
2644
+ {
2645
+ struct cpudata * cpu = all_cpu_data [cpunum ];
2646
+ int old_pstate = cpu -> pstate .current_pstate ;
2647
+ int cap_pstate , min_pstate , max_pstate , target_pstate ;
2648
+
2649
+ update_turbo_state ();
2650
+ cap_pstate = global .turbo_disabled ? cpu -> pstate .max_pstate :
2651
+ cpu -> pstate .turbo_pstate ;
2652
+
2653
+ /* Optimization: Avoid unnecessary divisions. */
2654
+
2655
+ target_pstate = cap_pstate ;
2656
+ if (target_perf < capacity )
2657
+ target_pstate = DIV_ROUND_UP (cap_pstate * target_perf , capacity );
2658
+
2659
+ min_pstate = cap_pstate ;
2660
+ if (min_perf < capacity )
2661
+ min_pstate = DIV_ROUND_UP (cap_pstate * min_perf , capacity );
2662
+
2663
+ if (min_pstate < cpu -> pstate .min_pstate )
2664
+ min_pstate = cpu -> pstate .min_pstate ;
2665
+
2666
+ if (min_pstate < cpu -> min_perf_ratio )
2667
+ min_pstate = cpu -> min_perf_ratio ;
2668
+
2669
+ max_pstate = min (cap_pstate , cpu -> max_perf_ratio );
2670
+ if (max_pstate < min_pstate )
2671
+ max_pstate = min_pstate ;
2672
+
2673
+ target_pstate = clamp_t (int , target_pstate , min_pstate , max_pstate );
2674
+
2675
+ intel_cpufreq_adjust_hwp (cpu , min_pstate , max_pstate , target_pstate , true);
2676
+
2677
+ cpu -> pstate .current_pstate = target_pstate ;
2678
+ intel_cpufreq_trace (cpu , INTEL_PSTATE_TRACE_FAST_SWITCH , old_pstate );
2679
+ }
2680
+
2637
2681
static int intel_cpufreq_cpu_init (struct cpufreq_policy * policy )
2638
2682
{
2639
2683
int max_state , turbo_max , min_freq , max_freq , ret ;
@@ -3032,6 +3076,8 @@ static int __init intel_pstate_init(void)
3032
3076
intel_pstate .attr = hwp_cpufreq_attrs ;
3033
3077
intel_cpufreq .attr = hwp_cpufreq_attrs ;
3034
3078
intel_cpufreq .flags |= CPUFREQ_NEED_UPDATE_LIMITS ;
3079
+ intel_cpufreq .fast_switch = NULL ;
3080
+ intel_cpufreq .adjust_perf = intel_cpufreq_adjust_perf ;
3035
3081
if (!default_driver )
3036
3082
default_driver = & intel_pstate ;
3037
3083
0 commit comments