Skip to content

Commit a365ab6

Browse files
committed
cpufreq: intel_pstate: Implement the ->adjust_perf() callback
Make intel_pstate expose the ->adjust_perf() callback when it operates in the passive mode with HWP enabled which causes the schedutil governor to use that callback instead of ->fast_switch(). The minimum and target performance-level values passed by the governor to ->adjust_perf() are converted to HWP.REQ.MIN and HWP.REQ.DESIRED, respectively, which allows the processor to adjust its configuration to maximize energy-efficiency while providing sufficient capacity. Signed-off-by: Rafael J. Wysocki <[email protected]> Acked-by: Srinivas Pandruvada <[email protected]> Acked-by: Viresh Kumar <[email protected]>
1 parent ee2cc42 commit a365ab6

File tree

1 file changed

+58
-12
lines changed

1 file changed

+58
-12
lines changed

drivers/cpufreq/intel_pstate.c

Lines changed: 58 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -2526,20 +2526,19 @@ static void intel_cpufreq_trace(struct cpudata *cpu, unsigned int trace_type, in
25262526
fp_toint(cpu->iowait_boost * 100));
25272527
}
25282528

2529-
static void intel_cpufreq_adjust_hwp(struct cpudata *cpu, u32 target_pstate,
2530-
bool strict, bool fast_switch)
2529+
static void intel_cpufreq_adjust_hwp(struct cpudata *cpu, u32 min, u32 max,
2530+
u32 desired, bool fast_switch)
25312531
{
25322532
u64 prev = READ_ONCE(cpu->hwp_req_cached), value = prev;
25332533

25342534
value &= ~HWP_MIN_PERF(~0L);
2535-
value |= HWP_MIN_PERF(target_pstate);
2535+
value |= HWP_MIN_PERF(min);
25362536

2537-
/*
2538-
* The entire MSR needs to be updated in order to update the HWP min
2539-
* field in it, so opportunistically update the max too if needed.
2540-
*/
25412537
value &= ~HWP_MAX_PERF(~0L);
2542-
value |= HWP_MAX_PERF(strict ? target_pstate : cpu->max_perf_ratio);
2538+
value |= HWP_MAX_PERF(max);
2539+
2540+
value &= ~HWP_DESIRED_PERF(~0L);
2541+
value |= HWP_DESIRED_PERF(desired);
25432542

25442543
if (value == prev)
25452544
return;
@@ -2569,11 +2568,15 @@ static int intel_cpufreq_update_pstate(struct cpufreq_policy *policy,
25692568
int old_pstate = cpu->pstate.current_pstate;
25702569

25712570
target_pstate = intel_pstate_prepare_request(cpu, target_pstate);
2572-
if (hwp_active)
2573-
intel_cpufreq_adjust_hwp(cpu, target_pstate,
2574-
policy->strict_target, fast_switch);
2575-
else if (target_pstate != old_pstate)
2571+
if (hwp_active) {
2572+
int max_pstate = policy->strict_target ?
2573+
target_pstate : cpu->max_perf_ratio;
2574+
2575+
intel_cpufreq_adjust_hwp(cpu, target_pstate, max_pstate, 0,
2576+
fast_switch);
2577+
} else if (target_pstate != old_pstate) {
25762578
intel_cpufreq_adjust_perf_ctl(cpu, target_pstate, fast_switch);
2579+
}
25772580

25782581
cpu->pstate.current_pstate = target_pstate;
25792582

@@ -2634,6 +2637,47 @@ static unsigned int intel_cpufreq_fast_switch(struct cpufreq_policy *policy,
26342637
return target_pstate * cpu->pstate.scaling;
26352638
}
26362639

2640+
static void intel_cpufreq_adjust_perf(unsigned int cpunum,
2641+
unsigned long min_perf,
2642+
unsigned long target_perf,
2643+
unsigned long capacity)
2644+
{
2645+
struct cpudata *cpu = all_cpu_data[cpunum];
2646+
int old_pstate = cpu->pstate.current_pstate;
2647+
int cap_pstate, min_pstate, max_pstate, target_pstate;
2648+
2649+
update_turbo_state();
2650+
cap_pstate = global.turbo_disabled ? cpu->pstate.max_pstate :
2651+
cpu->pstate.turbo_pstate;
2652+
2653+
/* Optimization: Avoid unnecessary divisions. */
2654+
2655+
target_pstate = cap_pstate;
2656+
if (target_perf < capacity)
2657+
target_pstate = DIV_ROUND_UP(cap_pstate * target_perf, capacity);
2658+
2659+
min_pstate = cap_pstate;
2660+
if (min_perf < capacity)
2661+
min_pstate = DIV_ROUND_UP(cap_pstate * min_perf, capacity);
2662+
2663+
if (min_pstate < cpu->pstate.min_pstate)
2664+
min_pstate = cpu->pstate.min_pstate;
2665+
2666+
if (min_pstate < cpu->min_perf_ratio)
2667+
min_pstate = cpu->min_perf_ratio;
2668+
2669+
max_pstate = min(cap_pstate, cpu->max_perf_ratio);
2670+
if (max_pstate < min_pstate)
2671+
max_pstate = min_pstate;
2672+
2673+
target_pstate = clamp_t(int, target_pstate, min_pstate, max_pstate);
2674+
2675+
intel_cpufreq_adjust_hwp(cpu, min_pstate, max_pstate, target_pstate, true);
2676+
2677+
cpu->pstate.current_pstate = target_pstate;
2678+
intel_cpufreq_trace(cpu, INTEL_PSTATE_TRACE_FAST_SWITCH, old_pstate);
2679+
}
2680+
26372681
static int intel_cpufreq_cpu_init(struct cpufreq_policy *policy)
26382682
{
26392683
int max_state, turbo_max, min_freq, max_freq, ret;
@@ -3032,6 +3076,8 @@ static int __init intel_pstate_init(void)
30323076
intel_pstate.attr = hwp_cpufreq_attrs;
30333077
intel_cpufreq.attr = hwp_cpufreq_attrs;
30343078
intel_cpufreq.flags |= CPUFREQ_NEED_UPDATE_LIMITS;
3079+
intel_cpufreq.fast_switch = NULL;
3080+
intel_cpufreq.adjust_perf = intel_cpufreq_adjust_perf;
30353081
if (!default_driver)
30363082
default_driver = &intel_pstate;
30373083

0 commit comments

Comments
 (0)