@@ -77,6 +77,7 @@ static inline bool has_target(void)
77
77
static int cpufreq_governor (struct cpufreq_policy * policy , unsigned int event );
78
78
static unsigned int __cpufreq_get (struct cpufreq_policy * policy );
79
79
static int cpufreq_start_governor (struct cpufreq_policy * policy );
80
+ static int cpufreq_exit_governor (struct cpufreq_policy * policy );
80
81
81
82
/**
82
83
* Two notifier lists: the "policy" list is involved in the
@@ -429,6 +430,68 @@ void cpufreq_freq_transition_end(struct cpufreq_policy *policy,
429
430
}
430
431
EXPORT_SYMBOL_GPL (cpufreq_freq_transition_end );
431
432
433
+ /*
434
+ * Fast frequency switching status count. Positive means "enabled", negative
435
+ * means "disabled" and 0 means "not decided yet".
436
+ */
437
+ static int cpufreq_fast_switch_count ;
438
+ static DEFINE_MUTEX (cpufreq_fast_switch_lock );
439
+
440
+ static void cpufreq_list_transition_notifiers (void )
441
+ {
442
+ struct notifier_block * nb ;
443
+
444
+ pr_info ("Registered transition notifiers:\n" );
445
+
446
+ mutex_lock (& cpufreq_transition_notifier_list .mutex );
447
+
448
+ for (nb = cpufreq_transition_notifier_list .head ; nb ; nb = nb -> next )
449
+ pr_info ("%pF\n" , nb -> notifier_call );
450
+
451
+ mutex_unlock (& cpufreq_transition_notifier_list .mutex );
452
+ }
453
+
454
+ /**
455
+ * cpufreq_enable_fast_switch - Enable fast frequency switching for policy.
456
+ * @policy: cpufreq policy to enable fast frequency switching for.
457
+ *
458
+ * Try to enable fast frequency switching for @policy.
459
+ *
460
+ * The attempt will fail if there is at least one transition notifier registered
461
+ * at this point, as fast frequency switching is quite fundamentally at odds
462
+ * with transition notifiers. Thus if successful, it will make registration of
463
+ * transition notifiers fail going forward.
464
+ */
465
+ void cpufreq_enable_fast_switch (struct cpufreq_policy * policy )
466
+ {
467
+ lockdep_assert_held (& policy -> rwsem );
468
+
469
+ if (!policy -> fast_switch_possible )
470
+ return ;
471
+
472
+ mutex_lock (& cpufreq_fast_switch_lock );
473
+ if (cpufreq_fast_switch_count >= 0 ) {
474
+ cpufreq_fast_switch_count ++ ;
475
+ policy -> fast_switch_enabled = true;
476
+ } else {
477
+ pr_warn ("CPU%u: Fast frequency switching not enabled\n" ,
478
+ policy -> cpu );
479
+ cpufreq_list_transition_notifiers ();
480
+ }
481
+ mutex_unlock (& cpufreq_fast_switch_lock );
482
+ }
483
+ EXPORT_SYMBOL_GPL (cpufreq_enable_fast_switch );
484
+
485
+ static void cpufreq_disable_fast_switch (struct cpufreq_policy * policy )
486
+ {
487
+ mutex_lock (& cpufreq_fast_switch_lock );
488
+ if (policy -> fast_switch_enabled ) {
489
+ policy -> fast_switch_enabled = false;
490
+ if (!WARN_ON (cpufreq_fast_switch_count <= 0 ))
491
+ cpufreq_fast_switch_count -- ;
492
+ }
493
+ mutex_unlock (& cpufreq_fast_switch_lock );
494
+ }
432
495
433
496
/*********************************************************************
434
497
* SYSFS INTERFACE *
@@ -1319,7 +1382,7 @@ static void cpufreq_offline(unsigned int cpu)
1319
1382
1320
1383
/* If cpu is last user of policy, free policy */
1321
1384
if (has_target ()) {
1322
- ret = cpufreq_governor (policy , CPUFREQ_GOV_POLICY_EXIT );
1385
+ ret = cpufreq_exit_governor (policy );
1323
1386
if (ret )
1324
1387
pr_err ("%s: Failed to exit governor\n" , __func__ );
1325
1388
}
@@ -1447,8 +1510,12 @@ static unsigned int __cpufreq_get(struct cpufreq_policy *policy)
1447
1510
1448
1511
ret_freq = cpufreq_driver -> get (policy -> cpu );
1449
1512
1450
- /* Updating inactive policies is invalid, so avoid doing that. */
1451
- if (unlikely (policy_is_inactive (policy )))
1513
+ /*
1514
+ * Updating inactive policies is invalid, so avoid doing that. Also
1515
+ * if fast frequency switching is used with the given policy, the check
1516
+ * against policy->cur is pointless, so skip it in that case too.
1517
+ */
1518
+ if (unlikely (policy_is_inactive (policy )) || policy -> fast_switch_enabled )
1452
1519
return ret_freq ;
1453
1520
1454
1521
if (ret_freq && policy -> cur &&
@@ -1672,8 +1739,18 @@ int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list)
1672
1739
1673
1740
switch (list ) {
1674
1741
case CPUFREQ_TRANSITION_NOTIFIER :
1742
+ mutex_lock (& cpufreq_fast_switch_lock );
1743
+
1744
+ if (cpufreq_fast_switch_count > 0 ) {
1745
+ mutex_unlock (& cpufreq_fast_switch_lock );
1746
+ return - EBUSY ;
1747
+ }
1675
1748
ret = srcu_notifier_chain_register (
1676
1749
& cpufreq_transition_notifier_list , nb );
1750
+ if (!ret )
1751
+ cpufreq_fast_switch_count -- ;
1752
+
1753
+ mutex_unlock (& cpufreq_fast_switch_lock );
1677
1754
break ;
1678
1755
case CPUFREQ_POLICY_NOTIFIER :
1679
1756
ret = blocking_notifier_chain_register (
@@ -1706,8 +1783,14 @@ int cpufreq_unregister_notifier(struct notifier_block *nb, unsigned int list)
1706
1783
1707
1784
switch (list ) {
1708
1785
case CPUFREQ_TRANSITION_NOTIFIER :
1786
+ mutex_lock (& cpufreq_fast_switch_lock );
1787
+
1709
1788
ret = srcu_notifier_chain_unregister (
1710
1789
& cpufreq_transition_notifier_list , nb );
1790
+ if (!ret && !WARN_ON (cpufreq_fast_switch_count >= 0 ))
1791
+ cpufreq_fast_switch_count ++ ;
1792
+
1793
+ mutex_unlock (& cpufreq_fast_switch_lock );
1711
1794
break ;
1712
1795
case CPUFREQ_POLICY_NOTIFIER :
1713
1796
ret = blocking_notifier_chain_unregister (
@@ -1726,6 +1809,37 @@ EXPORT_SYMBOL(cpufreq_unregister_notifier);
1726
1809
* GOVERNORS *
1727
1810
*********************************************************************/
1728
1811
1812
+ /**
1813
+ * cpufreq_driver_fast_switch - Carry out a fast CPU frequency switch.
1814
+ * @policy: cpufreq policy to switch the frequency for.
1815
+ * @target_freq: New frequency to set (may be approximate).
1816
+ *
1817
+ * Carry out a fast frequency switch without sleeping.
1818
+ *
1819
+ * The driver's ->fast_switch() callback invoked by this function must be
1820
+ * suitable for being called from within RCU-sched read-side critical sections
1821
+ * and it is expected to select the minimum available frequency greater than or
1822
+ * equal to @target_freq (CPUFREQ_RELATION_L).
1823
+ *
1824
+ * This function must not be called if policy->fast_switch_enabled is unset.
1825
+ *
1826
+ * Governors calling this function must guarantee that it will never be invoked
1827
+ * twice in parallel for the same policy and that it will never be called in
1828
+ * parallel with either ->target() or ->target_index() for the same policy.
1829
+ *
1830
+ * If CPUFREQ_ENTRY_INVALID is returned by the driver's ->fast_switch()
1831
+ * callback to indicate an error condition, the hardware configuration must be
1832
+ * preserved.
1833
+ */
1834
+ unsigned int cpufreq_driver_fast_switch (struct cpufreq_policy * policy ,
1835
+ unsigned int target_freq )
1836
+ {
1837
+ clamp_val (target_freq , policy -> min , policy -> max );
1838
+
1839
+ return cpufreq_driver -> fast_switch (policy , target_freq );
1840
+ }
1841
+ EXPORT_SYMBOL_GPL (cpufreq_driver_fast_switch );
1842
+
1729
1843
/* Must set freqs->new to intermediate frequency */
1730
1844
static int __target_intermediate (struct cpufreq_policy * policy ,
1731
1845
struct cpufreq_freqs * freqs , int index )
@@ -1946,6 +2060,12 @@ static int cpufreq_start_governor(struct cpufreq_policy *policy)
1946
2060
return ret ? ret : cpufreq_governor (policy , CPUFREQ_GOV_LIMITS );
1947
2061
}
1948
2062
2063
+ static int cpufreq_exit_governor (struct cpufreq_policy * policy )
2064
+ {
2065
+ cpufreq_disable_fast_switch (policy );
2066
+ return cpufreq_governor (policy , CPUFREQ_GOV_POLICY_EXIT );
2067
+ }
2068
+
1949
2069
int cpufreq_register_governor (struct cpufreq_governor * governor )
1950
2070
{
1951
2071
int err ;
@@ -2101,7 +2221,7 @@ static int cpufreq_set_policy(struct cpufreq_policy *policy,
2101
2221
return ret ;
2102
2222
}
2103
2223
2104
- ret = cpufreq_governor (policy , CPUFREQ_GOV_POLICY_EXIT );
2224
+ ret = cpufreq_exit_governor (policy );
2105
2225
if (ret ) {
2106
2226
pr_err ("%s: Failed to Exit Governor: %s (%d)\n" ,
2107
2227
__func__ , old_gov -> name , ret );
@@ -2118,7 +2238,7 @@ static int cpufreq_set_policy(struct cpufreq_policy *policy,
2118
2238
pr_debug ("cpufreq: governor change\n" );
2119
2239
return 0 ;
2120
2240
}
2121
- cpufreq_governor (policy , CPUFREQ_GOV_POLICY_EXIT );
2241
+ cpufreq_exit_governor (policy );
2122
2242
}
2123
2243
2124
2244
/* new governor failed, so re-start old one */
0 commit comments