@@ -5102,35 +5102,31 @@ int sanitize_rc6_option(struct drm_i915_private *dev_priv, int enable_rc6)
5102
5102
5103
5103
static void gen6_init_rps_frequencies (struct drm_i915_private * dev_priv )
5104
5104
{
5105
- uint32_t rp_state_cap ;
5106
- u32 ddcc_status = 0 ;
5107
- int ret ;
5108
-
5109
5105
/* All of these values are in units of 50MHz */
5110
- dev_priv -> rps . cur_freq = 0 ;
5106
+
5111
5107
/* static values from HW: RP0 > RP1 > RPn (min_freq) */
5112
5108
if (IS_BROXTON (dev_priv )) {
5113
- rp_state_cap = I915_READ (BXT_RP_STATE_CAP );
5109
+ u32 rp_state_cap = I915_READ (BXT_RP_STATE_CAP );
5114
5110
dev_priv -> rps .rp0_freq = (rp_state_cap >> 16 ) & 0xff ;
5115
5111
dev_priv -> rps .rp1_freq = (rp_state_cap >> 8 ) & 0xff ;
5116
5112
dev_priv -> rps .min_freq = (rp_state_cap >> 0 ) & 0xff ;
5117
5113
} else {
5118
- rp_state_cap = I915_READ (GEN6_RP_STATE_CAP );
5114
+ u32 rp_state_cap = I915_READ (GEN6_RP_STATE_CAP );
5119
5115
dev_priv -> rps .rp0_freq = (rp_state_cap >> 0 ) & 0xff ;
5120
5116
dev_priv -> rps .rp1_freq = (rp_state_cap >> 8 ) & 0xff ;
5121
5117
dev_priv -> rps .min_freq = (rp_state_cap >> 16 ) & 0xff ;
5122
5118
}
5123
-
5124
5119
/* hw_max = RP0 until we check for overclocking */
5125
- dev_priv -> rps .max_freq = dev_priv -> rps .rp0_freq ;
5120
+ dev_priv -> rps .max_freq = dev_priv -> rps .rp0_freq ;
5126
5121
5127
5122
dev_priv -> rps .efficient_freq = dev_priv -> rps .rp1_freq ;
5128
5123
if (IS_HASWELL (dev_priv ) || IS_BROADWELL (dev_priv ) ||
5129
5124
IS_SKYLAKE (dev_priv ) || IS_KABYLAKE (dev_priv )) {
5130
- ret = sandybridge_pcode_read (dev_priv ,
5131
- HSW_PCODE_DYNAMIC_DUTY_CYCLE_CONTROL ,
5132
- & ddcc_status );
5133
- if (0 == ret )
5125
+ u32 ddcc_status = 0 ;
5126
+
5127
+ if (sandybridge_pcode_read (dev_priv ,
5128
+ HSW_PCODE_DYNAMIC_DUTY_CYCLE_CONTROL ,
5129
+ & ddcc_status ) == 0 )
5134
5130
dev_priv -> rps .efficient_freq =
5135
5131
clamp_t (u8 ,
5136
5132
((ddcc_status >> 8 ) & 0xff ),
@@ -5140,30 +5136,14 @@ static void gen6_init_rps_frequencies(struct drm_i915_private *dev_priv)
5140
5136
5141
5137
if (IS_SKYLAKE (dev_priv ) || IS_KABYLAKE (dev_priv )) {
5142
5138
/* Store the frequency values in 16.66 MHZ units, which is
5143
- the natural hardware unit for SKL */
5139
+ * the natural hardware unit for SKL
5140
+ */
5144
5141
dev_priv -> rps .rp0_freq *= GEN9_FREQ_SCALER ;
5145
5142
dev_priv -> rps .rp1_freq *= GEN9_FREQ_SCALER ;
5146
5143
dev_priv -> rps .min_freq *= GEN9_FREQ_SCALER ;
5147
5144
dev_priv -> rps .max_freq *= GEN9_FREQ_SCALER ;
5148
5145
dev_priv -> rps .efficient_freq *= GEN9_FREQ_SCALER ;
5149
5146
}
5150
-
5151
- dev_priv -> rps .idle_freq = dev_priv -> rps .min_freq ;
5152
- dev_priv -> rps .cur_freq = dev_priv -> rps .idle_freq ;
5153
-
5154
- /* Preserve min/max settings in case of re-init */
5155
- if (dev_priv -> rps .max_freq_softlimit == 0 )
5156
- dev_priv -> rps .max_freq_softlimit = dev_priv -> rps .max_freq ;
5157
-
5158
- if (dev_priv -> rps .min_freq_softlimit == 0 ) {
5159
- if (IS_HASWELL (dev_priv ) || IS_BROADWELL (dev_priv ))
5160
- dev_priv -> rps .min_freq_softlimit =
5161
- max_t (int , dev_priv -> rps .efficient_freq ,
5162
- intel_freq_opcode (dev_priv , 450 ));
5163
- else
5164
- dev_priv -> rps .min_freq_softlimit =
5165
- dev_priv -> rps .min_freq ;
5166
- }
5167
5147
}
5168
5148
5169
5149
static void reset_rps (struct drm_i915_private * dev_priv ,
@@ -5183,8 +5163,6 @@ static void gen9_enable_rps(struct drm_i915_private *dev_priv)
5183
5163
{
5184
5164
intel_uncore_forcewake_get (dev_priv , FORCEWAKE_ALL );
5185
5165
5186
- gen6_init_rps_frequencies (dev_priv );
5187
-
5188
5166
/* WaGsvDisableTurbo: Workaround to disable turbo on BXT A* */
5189
5167
if (IS_BXT_REVID (dev_priv , 0 , BXT_REVID_A1 )) {
5190
5168
/*
@@ -5301,9 +5279,6 @@ static void gen8_enable_rps(struct drm_i915_private *dev_priv)
5301
5279
/* 2a: Disable RC states. */
5302
5280
I915_WRITE (GEN6_RC_CONTROL , 0 );
5303
5281
5304
- /* Initialize rps frequencies */
5305
- gen6_init_rps_frequencies (dev_priv );
5306
-
5307
5282
/* 2b: Program RC6 thresholds.*/
5308
5283
I915_WRITE (GEN6_RC6_WAKE_RATE_LIMIT , 40 << 16 );
5309
5284
I915_WRITE (GEN6_RC_EVALUATION_INTERVAL , 125000 ); /* 12500 * 1280ns */
@@ -5392,9 +5367,6 @@ static void gen6_enable_rps(struct drm_i915_private *dev_priv)
5392
5367
5393
5368
intel_uncore_forcewake_get (dev_priv , FORCEWAKE_ALL );
5394
5369
5395
- /* Initialize rps frequencies */
5396
- gen6_init_rps_frequencies (dev_priv );
5397
-
5398
5370
/* disable the counters and set deterministic thresholds */
5399
5371
I915_WRITE (GEN6_RC_CONTROL , 0 );
5400
5372
@@ -5778,8 +5750,6 @@ static void valleyview_init_gt_powersave(struct drm_i915_private *dev_priv)
5778
5750
5779
5751
vlv_init_gpll_ref_freq (dev_priv );
5780
5752
5781
- mutex_lock (& dev_priv -> rps .hw_lock );
5782
-
5783
5753
val = vlv_punit_read (dev_priv , PUNIT_REG_GPU_FREQ_STS );
5784
5754
switch ((val >> 6 ) & 3 ) {
5785
5755
case 0 :
@@ -5815,18 +5785,6 @@ static void valleyview_init_gt_powersave(struct drm_i915_private *dev_priv)
5815
5785
DRM_DEBUG_DRIVER ("min GPU freq: %d MHz (%u)\n" ,
5816
5786
intel_gpu_freq (dev_priv , dev_priv -> rps .min_freq ),
5817
5787
dev_priv -> rps .min_freq );
5818
-
5819
- dev_priv -> rps .idle_freq = dev_priv -> rps .min_freq ;
5820
- dev_priv -> rps .cur_freq = dev_priv -> rps .idle_freq ;
5821
-
5822
- /* Preserve min/max settings in case of re-init */
5823
- if (dev_priv -> rps .max_freq_softlimit == 0 )
5824
- dev_priv -> rps .max_freq_softlimit = dev_priv -> rps .max_freq ;
5825
-
5826
- if (dev_priv -> rps .min_freq_softlimit == 0 )
5827
- dev_priv -> rps .min_freq_softlimit = dev_priv -> rps .min_freq ;
5828
-
5829
- mutex_unlock (& dev_priv -> rps .hw_lock );
5830
5788
}
5831
5789
5832
5790
static void cherryview_init_gt_powersave (struct drm_i915_private * dev_priv )
@@ -5837,8 +5795,6 @@ static void cherryview_init_gt_powersave(struct drm_i915_private *dev_priv)
5837
5795
5838
5796
vlv_init_gpll_ref_freq (dev_priv );
5839
5797
5840
- mutex_lock (& dev_priv -> rps .hw_lock );
5841
-
5842
5798
mutex_lock (& dev_priv -> sb_lock );
5843
5799
val = vlv_cck_read (dev_priv , CCK_FUSE_REG );
5844
5800
mutex_unlock (& dev_priv -> sb_lock );
@@ -5880,18 +5836,6 @@ static void cherryview_init_gt_powersave(struct drm_i915_private *dev_priv)
5880
5836
dev_priv -> rps .rp1_freq |
5881
5837
dev_priv -> rps .min_freq ) & 1 ,
5882
5838
"Odd GPU freq values\n" );
5883
-
5884
- dev_priv -> rps .idle_freq = dev_priv -> rps .min_freq ;
5885
- dev_priv -> rps .cur_freq = dev_priv -> rps .idle_freq ;
5886
-
5887
- /* Preserve min/max settings in case of re-init */
5888
- if (dev_priv -> rps .max_freq_softlimit == 0 )
5889
- dev_priv -> rps .max_freq_softlimit = dev_priv -> rps .max_freq ;
5890
-
5891
- if (dev_priv -> rps .min_freq_softlimit == 0 )
5892
- dev_priv -> rps .min_freq_softlimit = dev_priv -> rps .min_freq ;
5893
-
5894
- mutex_unlock (& dev_priv -> rps .hw_lock );
5895
5839
}
5896
5840
5897
5841
static void valleyview_cleanup_gt_powersave (struct drm_i915_private * dev_priv )
@@ -6559,10 +6503,30 @@ void intel_init_gt_powersave(struct drm_i915_private *dev_priv)
6559
6503
intel_runtime_pm_get (dev_priv );
6560
6504
}
6561
6505
6506
+ mutex_lock (& dev_priv -> rps .hw_lock );
6507
+
6508
+ /* Initialize RPS limits (for userspace) */
6562
6509
if (IS_CHERRYVIEW (dev_priv ))
6563
6510
cherryview_init_gt_powersave (dev_priv );
6564
6511
else if (IS_VALLEYVIEW (dev_priv ))
6565
6512
valleyview_init_gt_powersave (dev_priv );
6513
+ else
6514
+ gen6_init_rps_frequencies (dev_priv );
6515
+
6516
+ /* Derive initial user preferences/limits from the hardware limits */
6517
+ dev_priv -> rps .idle_freq = dev_priv -> rps .min_freq ;
6518
+ dev_priv -> rps .cur_freq = dev_priv -> rps .idle_freq ;
6519
+
6520
+ dev_priv -> rps .max_freq_softlimit = dev_priv -> rps .max_freq ;
6521
+ dev_priv -> rps .min_freq_softlimit = dev_priv -> rps .min_freq ;
6522
+
6523
+ if (IS_HASWELL (dev_priv ) || IS_BROADWELL (dev_priv ))
6524
+ dev_priv -> rps .min_freq_softlimit =
6525
+ max_t (int ,
6526
+ dev_priv -> rps .efficient_freq ,
6527
+ intel_freq_opcode (dev_priv , 450 ));
6528
+
6529
+ mutex_unlock (& dev_priv -> rps .hw_lock );
6566
6530
}
6567
6531
6568
6532
void intel_cleanup_gt_powersave (struct drm_i915_private * dev_priv )
0 commit comments