@@ -72,6 +72,10 @@ static int smu_set_power_limit(void *handle, uint32_t limit);
72
72
static int smu_set_fan_speed_rpm (void * handle , uint32_t speed );
73
73
static int smu_set_gfx_cgpg (struct smu_context * smu , bool enabled );
74
74
static int smu_set_mp1_state (void * handle , enum pp_mp1_state mp1_state );
75
+ static void smu_power_profile_mode_get (struct smu_context * smu ,
76
+ enum PP_SMC_POWER_PROFILE profile_mode );
77
+ static void smu_power_profile_mode_put (struct smu_context * smu ,
78
+ enum PP_SMC_POWER_PROFILE profile_mode );
75
79
76
80
static int smu_sys_get_pp_feature_mask (void * handle ,
77
81
char * buf )
@@ -1259,35 +1263,19 @@ static int smu_sw_init(struct amdgpu_ip_block *ip_block)
1259
1263
INIT_WORK (& smu -> interrupt_work , smu_interrupt_work_fn );
1260
1264
atomic64_set (& smu -> throttle_int_counter , 0 );
1261
1265
smu -> watermarks_bitmap = 0 ;
1262
- smu -> power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT ;
1263
- smu -> default_power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT ;
1264
1266
1265
1267
atomic_set (& smu -> smu_power .power_gate .vcn_gated , 1 );
1266
1268
atomic_set (& smu -> smu_power .power_gate .jpeg_gated , 1 );
1267
1269
atomic_set (& smu -> smu_power .power_gate .vpe_gated , 1 );
1268
1270
atomic_set (& smu -> smu_power .power_gate .umsch_mm_gated , 1 );
1269
1271
1270
- smu -> workload_prority [PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT ] = 0 ;
1271
- smu -> workload_prority [PP_SMC_POWER_PROFILE_FULLSCREEN3D ] = 1 ;
1272
- smu -> workload_prority [PP_SMC_POWER_PROFILE_POWERSAVING ] = 2 ;
1273
- smu -> workload_prority [PP_SMC_POWER_PROFILE_VIDEO ] = 3 ;
1274
- smu -> workload_prority [PP_SMC_POWER_PROFILE_VR ] = 4 ;
1275
- smu -> workload_prority [PP_SMC_POWER_PROFILE_COMPUTE ] = 5 ;
1276
- smu -> workload_prority [PP_SMC_POWER_PROFILE_CUSTOM ] = 6 ;
1277
-
1278
1272
if (smu -> is_apu ||
1279
1273
!smu_is_workload_profile_available (smu , PP_SMC_POWER_PROFILE_FULLSCREEN3D ))
1280
- smu -> workload_mask = 1 << smu -> workload_prority [ PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT ] ;
1274
+ smu -> power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT ;
1281
1275
else
1282
- smu -> workload_mask = 1 << smu -> workload_prority [PP_SMC_POWER_PROFILE_FULLSCREEN3D ];
1283
-
1284
- smu -> workload_setting [0 ] = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT ;
1285
- smu -> workload_setting [1 ] = PP_SMC_POWER_PROFILE_FULLSCREEN3D ;
1286
- smu -> workload_setting [2 ] = PP_SMC_POWER_PROFILE_POWERSAVING ;
1287
- smu -> workload_setting [3 ] = PP_SMC_POWER_PROFILE_VIDEO ;
1288
- smu -> workload_setting [4 ] = PP_SMC_POWER_PROFILE_VR ;
1289
- smu -> workload_setting [5 ] = PP_SMC_POWER_PROFILE_COMPUTE ;
1290
- smu -> workload_setting [6 ] = PP_SMC_POWER_PROFILE_CUSTOM ;
1276
+ smu -> power_profile_mode = PP_SMC_POWER_PROFILE_FULLSCREEN3D ;
1277
+ smu_power_profile_mode_get (smu , smu -> power_profile_mode );
1278
+
1291
1279
smu -> display_config = & adev -> pm .pm_display_cfg ;
1292
1280
1293
1281
smu -> smu_dpm .dpm_level = AMD_DPM_FORCED_LEVEL_AUTO ;
@@ -1340,6 +1328,11 @@ static int smu_sw_fini(struct amdgpu_ip_block *ip_block)
1340
1328
return ret ;
1341
1329
}
1342
1330
1331
+ if (smu -> custom_profile_params ) {
1332
+ kfree (smu -> custom_profile_params );
1333
+ smu -> custom_profile_params = NULL ;
1334
+ }
1335
+
1343
1336
smu_fini_microcode (smu );
1344
1337
1345
1338
return 0 ;
@@ -2124,6 +2117,9 @@ static int smu_suspend(struct amdgpu_ip_block *ip_block)
2124
2117
if (!ret )
2125
2118
adev -> gfx .gfx_off_entrycount = count ;
2126
2119
2120
+ /* clear this on suspend so it will get reprogrammed on resume */
2121
+ smu -> workload_mask = 0 ;
2122
+
2127
2123
return 0 ;
2128
2124
}
2129
2125
@@ -2236,25 +2232,49 @@ static int smu_enable_umd_pstate(void *handle,
2236
2232
}
2237
2233
2238
2234
static int smu_bump_power_profile_mode (struct smu_context * smu ,
2239
- long * param ,
2240
- uint32_t param_size )
2235
+ long * custom_params ,
2236
+ u32 custom_params_max_idx )
2241
2237
{
2242
- int ret = 0 ;
2238
+ u32 workload_mask = 0 ;
2239
+ int i , ret = 0 ;
2240
+
2241
+ for (i = 0 ; i < PP_SMC_POWER_PROFILE_COUNT ; i ++ ) {
2242
+ if (smu -> workload_refcount [i ])
2243
+ workload_mask |= 1 << i ;
2244
+ }
2245
+
2246
+ if (smu -> workload_mask == workload_mask )
2247
+ return 0 ;
2243
2248
2244
2249
if (smu -> ppt_funcs -> set_power_profile_mode )
2245
- ret = smu -> ppt_funcs -> set_power_profile_mode (smu , param , param_size );
2250
+ ret = smu -> ppt_funcs -> set_power_profile_mode (smu , workload_mask ,
2251
+ custom_params ,
2252
+ custom_params_max_idx );
2253
+
2254
+ if (!ret )
2255
+ smu -> workload_mask = workload_mask ;
2246
2256
2247
2257
return ret ;
2248
2258
}
2249
2259
2260
+ static void smu_power_profile_mode_get (struct smu_context * smu ,
2261
+ enum PP_SMC_POWER_PROFILE profile_mode )
2262
+ {
2263
+ smu -> workload_refcount [profile_mode ]++ ;
2264
+ }
2265
+
2266
+ static void smu_power_profile_mode_put (struct smu_context * smu ,
2267
+ enum PP_SMC_POWER_PROFILE profile_mode )
2268
+ {
2269
+ if (smu -> workload_refcount [profile_mode ])
2270
+ smu -> workload_refcount [profile_mode ]-- ;
2271
+ }
2272
+
2250
2273
static int smu_adjust_power_state_dynamic (struct smu_context * smu ,
2251
2274
enum amd_dpm_forced_level level ,
2252
- bool skip_display_settings ,
2253
- bool init )
2275
+ bool skip_display_settings )
2254
2276
{
2255
2277
int ret = 0 ;
2256
- int index = 0 ;
2257
- long workload [1 ];
2258
2278
struct smu_dpm_context * smu_dpm_ctx = & (smu -> smu_dpm );
2259
2279
2260
2280
if (!skip_display_settings ) {
@@ -2291,14 +2311,8 @@ static int smu_adjust_power_state_dynamic(struct smu_context *smu,
2291
2311
}
2292
2312
2293
2313
if (smu_dpm_ctx -> dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL &&
2294
- smu_dpm_ctx -> dpm_level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM ) {
2295
- index = fls (smu -> workload_mask );
2296
- index = index > 0 && index <= WORKLOAD_POLICY_MAX ? index - 1 : 0 ;
2297
- workload [0 ] = smu -> workload_setting [index ];
2298
-
2299
- if (init || smu -> power_profile_mode != workload [0 ])
2300
- smu_bump_power_profile_mode (smu , workload , 0 );
2301
- }
2314
+ smu_dpm_ctx -> dpm_level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM )
2315
+ smu_bump_power_profile_mode (smu , NULL , 0 );
2302
2316
2303
2317
return ret ;
2304
2318
}
@@ -2317,13 +2331,13 @@ static int smu_handle_task(struct smu_context *smu,
2317
2331
ret = smu_pre_display_config_changed (smu );
2318
2332
if (ret )
2319
2333
return ret ;
2320
- ret = smu_adjust_power_state_dynamic (smu , level , false, false );
2334
+ ret = smu_adjust_power_state_dynamic (smu , level , false);
2321
2335
break ;
2322
2336
case AMD_PP_TASK_COMPLETE_INIT :
2323
- ret = smu_adjust_power_state_dynamic (smu , level , true, true );
2337
+ ret = smu_adjust_power_state_dynamic (smu , level , true);
2324
2338
break ;
2325
2339
case AMD_PP_TASK_READJUST_POWER_STATE :
2326
- ret = smu_adjust_power_state_dynamic (smu , level , true, false );
2340
+ ret = smu_adjust_power_state_dynamic (smu , level , true);
2327
2341
break ;
2328
2342
default :
2329
2343
break ;
@@ -2345,34 +2359,33 @@ static int smu_handle_dpm_task(void *handle,
2345
2359
2346
2360
static int smu_switch_power_profile (void * handle ,
2347
2361
enum PP_SMC_POWER_PROFILE type ,
2348
- bool en )
2362
+ bool enable )
2349
2363
{
2350
2364
struct smu_context * smu = handle ;
2351
2365
struct smu_dpm_context * smu_dpm_ctx = & (smu -> smu_dpm );
2352
- long workload [1 ];
2353
- uint32_t index ;
2366
+ int ret ;
2354
2367
2355
2368
if (!smu -> pm_enabled || !smu -> adev -> pm .dpm_enabled )
2356
2369
return - EOPNOTSUPP ;
2357
2370
2358
2371
if (!(type < PP_SMC_POWER_PROFILE_CUSTOM ))
2359
2372
return - EINVAL ;
2360
2373
2361
- if (!en ) {
2362
- smu -> workload_mask &= ~(1 << smu -> workload_prority [type ]);
2363
- index = fls (smu -> workload_mask );
2364
- index = index > 0 && index <= WORKLOAD_POLICY_MAX ? index - 1 : 0 ;
2365
- workload [0 ] = smu -> workload_setting [index ];
2366
- } else {
2367
- smu -> workload_mask |= (1 << smu -> workload_prority [type ]);
2368
- index = fls (smu -> workload_mask );
2369
- index = index <= WORKLOAD_POLICY_MAX ? index - 1 : 0 ;
2370
- workload [0 ] = smu -> workload_setting [index ];
2371
- }
2372
-
2373
2374
if (smu_dpm_ctx -> dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL &&
2374
- smu_dpm_ctx -> dpm_level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM )
2375
- smu_bump_power_profile_mode (smu , workload , 0 );
2375
+ smu_dpm_ctx -> dpm_level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM ) {
2376
+ if (enable )
2377
+ smu_power_profile_mode_get (smu , type );
2378
+ else
2379
+ smu_power_profile_mode_put (smu , type );
2380
+ ret = smu_bump_power_profile_mode (smu , NULL , 0 );
2381
+ if (ret ) {
2382
+ if (enable )
2383
+ smu_power_profile_mode_put (smu , type );
2384
+ else
2385
+ smu_power_profile_mode_get (smu , type );
2386
+ return ret ;
2387
+ }
2388
+ }
2376
2389
2377
2390
return 0 ;
2378
2391
}
@@ -3064,12 +3077,35 @@ static int smu_set_power_profile_mode(void *handle,
3064
3077
uint32_t param_size )
3065
3078
{
3066
3079
struct smu_context * smu = handle ;
3080
+ bool custom = false;
3081
+ int ret = 0 ;
3067
3082
3068
3083
if (!smu -> pm_enabled || !smu -> adev -> pm .dpm_enabled ||
3069
3084
!smu -> ppt_funcs -> set_power_profile_mode )
3070
3085
return - EOPNOTSUPP ;
3071
3086
3072
- return smu_bump_power_profile_mode (smu , param , param_size );
3087
+ if (param [param_size ] == PP_SMC_POWER_PROFILE_CUSTOM ) {
3088
+ custom = true;
3089
+ /* clear frontend mask so custom changes propogate */
3090
+ smu -> workload_mask = 0 ;
3091
+ }
3092
+
3093
+ if ((param [param_size ] != smu -> power_profile_mode ) || custom ) {
3094
+ /* clear the old user preference */
3095
+ smu_power_profile_mode_put (smu , smu -> power_profile_mode );
3096
+ /* set the new user preference */
3097
+ smu_power_profile_mode_get (smu , param [param_size ]);
3098
+ ret = smu_bump_power_profile_mode (smu ,
3099
+ custom ? param : NULL ,
3100
+ custom ? param_size : 0 );
3101
+ if (ret )
3102
+ smu_power_profile_mode_put (smu , param [param_size ]);
3103
+ else
3104
+ /* store the user's preference */
3105
+ smu -> power_profile_mode = param [param_size ];
3106
+ }
3107
+
3108
+ return ret ;
3073
3109
}
3074
3110
3075
3111
static int smu_get_fan_control_mode (void * handle , u32 * fan_mode )
0 commit comments