@@ -3097,6 +3097,7 @@ static int dm_resume(void *handle)
3097
3097
3098
3098
commit_params .streams = dc_state -> streams ;
3099
3099
commit_params .stream_count = dc_state -> stream_count ;
3100
+ dc_exit_ips_for_hw_access (dm -> dc );
3100
3101
WARN_ON (!dc_commit_streams (dm -> dc , & commit_params ));
3101
3102
3102
3103
dm_gpureset_commit_state (dm -> cached_dc_state , dm );
@@ -3169,6 +3170,7 @@ static int dm_resume(void *handle)
3169
3170
emulated_link_detect (aconnector -> dc_link );
3170
3171
} else {
3171
3172
mutex_lock (& dm -> dc_lock );
3173
+ dc_exit_ips_for_hw_access (dm -> dc );
3172
3174
dc_link_detect (aconnector -> dc_link , DETECT_REASON_HPD );
3173
3175
mutex_unlock (& dm -> dc_lock );
3174
3176
}
@@ -3505,6 +3507,7 @@ static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector)
3505
3507
enum dc_connection_type new_connection_type = dc_connection_none ;
3506
3508
struct amdgpu_device * adev = drm_to_adev (dev );
3507
3509
struct dm_connector_state * dm_con_state = to_dm_connector_state (connector -> state );
3510
+ struct dc * dc = aconnector -> dc_link -> ctx -> dc ;
3508
3511
bool ret = false;
3509
3512
3510
3513
if (adev -> dm .disable_hpd_irq )
@@ -3539,6 +3542,7 @@ static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector)
3539
3542
drm_kms_helper_connector_hotplug_event (connector );
3540
3543
} else {
3541
3544
mutex_lock (& adev -> dm .dc_lock );
3545
+ dc_exit_ips_for_hw_access (dc );
3542
3546
ret = dc_link_detect (aconnector -> dc_link , DETECT_REASON_HPD );
3543
3547
mutex_unlock (& adev -> dm .dc_lock );
3544
3548
if (ret ) {
@@ -3598,6 +3602,7 @@ static void handle_hpd_rx_irq(void *param)
3598
3602
bool has_left_work = false;
3599
3603
int idx = dc_link -> link_index ;
3600
3604
struct hpd_rx_irq_offload_work_queue * offload_wq = & adev -> dm .hpd_rx_offload_wq [idx ];
3605
+ struct dc * dc = aconnector -> dc_link -> ctx -> dc ;
3601
3606
3602
3607
memset (& hpd_irq_data , 0 , sizeof (hpd_irq_data ));
3603
3608
@@ -3687,6 +3692,7 @@ static void handle_hpd_rx_irq(void *param)
3687
3692
bool ret = false;
3688
3693
3689
3694
mutex_lock (& adev -> dm .dc_lock );
3695
+ dc_exit_ips_for_hw_access (dc );
3690
3696
ret = dc_link_detect (dc_link , DETECT_REASON_HPDRX );
3691
3697
mutex_unlock (& adev -> dm .dc_lock );
3692
3698
@@ -4888,6 +4894,7 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
4888
4894
bool ret = false;
4889
4895
4890
4896
mutex_lock (& dm -> dc_lock );
4897
+ dc_exit_ips_for_hw_access (dm -> dc );
4891
4898
ret = dc_link_detect (link , DETECT_REASON_BOOT );
4892
4899
mutex_unlock (& dm -> dc_lock );
4893
4900
@@ -9298,6 +9305,7 @@ static void amdgpu_dm_commit_streams(struct drm_atomic_state *state,
9298
9305
9299
9306
memset (& position , 0 , sizeof (position ));
9300
9307
mutex_lock (& dm -> dc_lock );
9308
+ dc_exit_ips_for_hw_access (dm -> dc );
9301
9309
dc_stream_program_cursor_position (dm_old_crtc_state -> stream , & position );
9302
9310
mutex_unlock (& dm -> dc_lock );
9303
9311
}
@@ -9372,6 +9380,7 @@ static void amdgpu_dm_commit_streams(struct drm_atomic_state *state,
9372
9380
9373
9381
dm_enable_per_frame_crtc_master_sync (dc_state );
9374
9382
mutex_lock (& dm -> dc_lock );
9383
+ dc_exit_ips_for_hw_access (dm -> dc );
9375
9384
WARN_ON (!dc_commit_streams (dm -> dc , & params ));
9376
9385
9377
9386
/* Allow idle optimization when vblank count is 0 for display off */
@@ -9737,6 +9746,7 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
9737
9746
9738
9747
9739
9748
mutex_lock (& dm -> dc_lock );
9749
+ dc_exit_ips_for_hw_access (dm -> dc );
9740
9750
dc_update_planes_and_stream (dm -> dc ,
9741
9751
dummy_updates ,
9742
9752
status -> plane_count ,
0 commit comments