@@ -162,7 +162,7 @@ static u32 __i915_gem_park(struct drm_i915_private *i915)
162
162
synchronize_irq (i915 -> drm .irq );
163
163
164
164
intel_engines_park (i915 );
165
- i915_gem_timelines_park (i915 );
165
+ i915_timelines_park (i915 );
166
166
167
167
i915_pmu_gt_parked (i915 );
168
168
@@ -2977,8 +2977,8 @@ i915_gem_find_active_request(struct intel_engine_cs *engine)
2977
2977
* extra delay for a recent interrupt is pointless. Hence, we do
2978
2978
* not need an engine->irq_seqno_barrier() before the seqno reads.
2979
2979
*/
2980
- spin_lock_irqsave (& engine -> timeline -> lock , flags );
2981
- list_for_each_entry (request , & engine -> timeline -> requests , link ) {
2980
+ spin_lock_irqsave (& engine -> timeline . lock , flags );
2981
+ list_for_each_entry (request , & engine -> timeline . requests , link ) {
2982
2982
if (__i915_request_completed (request , request -> global_seqno ))
2983
2983
continue ;
2984
2984
@@ -2989,7 +2989,7 @@ i915_gem_find_active_request(struct intel_engine_cs *engine)
2989
2989
active = request ;
2990
2990
break ;
2991
2991
}
2992
- spin_unlock_irqrestore (& engine -> timeline -> lock , flags );
2992
+ spin_unlock_irqrestore (& engine -> timeline . lock , flags );
2993
2993
2994
2994
return active ;
2995
2995
}
@@ -3110,23 +3110,23 @@ static void engine_skip_context(struct i915_request *request)
3110
3110
{
3111
3111
struct intel_engine_cs * engine = request -> engine ;
3112
3112
struct i915_gem_context * hung_ctx = request -> ctx ;
3113
- struct intel_timeline * timeline = request -> timeline ;
3113
+ struct i915_timeline * timeline = request -> timeline ;
3114
3114
unsigned long flags ;
3115
3115
3116
- GEM_BUG_ON (timeline == engine -> timeline );
3116
+ GEM_BUG_ON (timeline == & engine -> timeline );
3117
3117
3118
- spin_lock_irqsave (& engine -> timeline -> lock , flags );
3118
+ spin_lock_irqsave (& engine -> timeline . lock , flags );
3119
3119
spin_lock (& timeline -> lock );
3120
3120
3121
- list_for_each_entry_continue (request , & engine -> timeline -> requests , link )
3121
+ list_for_each_entry_continue (request , & engine -> timeline . requests , link )
3122
3122
if (request -> ctx == hung_ctx )
3123
3123
skip_request (request );
3124
3124
3125
3125
list_for_each_entry (request , & timeline -> requests , link )
3126
3126
skip_request (request );
3127
3127
3128
3128
spin_unlock (& timeline -> lock );
3129
- spin_unlock_irqrestore (& engine -> timeline -> lock , flags );
3129
+ spin_unlock_irqrestore (& engine -> timeline . lock , flags );
3130
3130
}
3131
3131
3132
3132
/* Returns the request if it was guilty of the hang */
@@ -3183,11 +3183,11 @@ i915_gem_reset_request(struct intel_engine_cs *engine,
3183
3183
dma_fence_set_error (& request -> fence , - EAGAIN );
3184
3184
3185
3185
/* Rewind the engine to replay the incomplete rq */
3186
- spin_lock_irq (& engine -> timeline -> lock );
3186
+ spin_lock_irq (& engine -> timeline . lock );
3187
3187
request = list_prev_entry (request , link );
3188
- if (& request -> link == & engine -> timeline -> requests )
3188
+ if (& request -> link == & engine -> timeline . requests )
3189
3189
request = NULL ;
3190
- spin_unlock_irq (& engine -> timeline -> lock );
3190
+ spin_unlock_irq (& engine -> timeline . lock );
3191
3191
}
3192
3192
}
3193
3193
@@ -3300,10 +3300,10 @@ static void nop_complete_submit_request(struct i915_request *request)
3300
3300
request -> fence .context , request -> fence .seqno );
3301
3301
dma_fence_set_error (& request -> fence , - EIO );
3302
3302
3303
- spin_lock_irqsave (& request -> engine -> timeline -> lock , flags );
3303
+ spin_lock_irqsave (& request -> engine -> timeline . lock , flags );
3304
3304
__i915_request_submit (request );
3305
3305
intel_engine_init_global_seqno (request -> engine , request -> global_seqno );
3306
- spin_unlock_irqrestore (& request -> engine -> timeline -> lock , flags );
3306
+ spin_unlock_irqrestore (& request -> engine -> timeline . lock , flags );
3307
3307
}
3308
3308
3309
3309
void i915_gem_set_wedged (struct drm_i915_private * i915 )
@@ -3372,10 +3372,10 @@ void i915_gem_set_wedged(struct drm_i915_private *i915)
3372
3372
* (lockless) lookup doesn't try and wait upon the request as we
3373
3373
* reset it.
3374
3374
*/
3375
- spin_lock_irqsave (& engine -> timeline -> lock , flags );
3375
+ spin_lock_irqsave (& engine -> timeline . lock , flags );
3376
3376
intel_engine_init_global_seqno (engine ,
3377
3377
intel_engine_last_submit (engine ));
3378
- spin_unlock_irqrestore (& engine -> timeline -> lock , flags );
3378
+ spin_unlock_irqrestore (& engine -> timeline . lock , flags );
3379
3379
3380
3380
i915_gem_reset_finish_engine (engine );
3381
3381
}
@@ -3387,8 +3387,7 @@ void i915_gem_set_wedged(struct drm_i915_private *i915)
3387
3387
3388
3388
bool i915_gem_unset_wedged (struct drm_i915_private * i915 )
3389
3389
{
3390
- struct i915_gem_timeline * tl ;
3391
- int i ;
3390
+ struct i915_timeline * tl ;
3392
3391
3393
3392
lockdep_assert_held (& i915 -> drm .struct_mutex );
3394
3393
if (!test_bit (I915_WEDGED , & i915 -> gpu_error .flags ))
@@ -3407,29 +3406,27 @@ bool i915_gem_unset_wedged(struct drm_i915_private *i915)
3407
3406
* No more can be submitted until we reset the wedged bit.
3408
3407
*/
3409
3408
list_for_each_entry (tl , & i915 -> gt .timelines , link ) {
3410
- for (i = 0 ; i < ARRAY_SIZE (tl -> engine ); i ++ ) {
3411
- struct i915_request * rq ;
3409
+ struct i915_request * rq ;
3412
3410
3413
- rq = i915_gem_active_peek (& tl -> engine [ i ]. last_request ,
3414
- & i915 -> drm .struct_mutex );
3415
- if (!rq )
3416
- continue ;
3411
+ rq = i915_gem_active_peek (& tl -> last_request ,
3412
+ & i915 -> drm .struct_mutex );
3413
+ if (!rq )
3414
+ continue ;
3417
3415
3418
- /*
3419
- * We can't use our normal waiter as we want to
3420
- * avoid recursively trying to handle the current
3421
- * reset. The basic dma_fence_default_wait() installs
3422
- * a callback for dma_fence_signal(), which is
3423
- * triggered by our nop handler (indirectly, the
3424
- * callback enables the signaler thread which is
3425
- * woken by the nop_submit_request() advancing the seqno
3426
- * and when the seqno passes the fence, the signaler
3427
- * then signals the fence waking us up).
3428
- */
3429
- if (dma_fence_default_wait (& rq -> fence , true,
3430
- MAX_SCHEDULE_TIMEOUT ) < 0 )
3431
- return false;
3432
- }
3416
+ /*
3417
+ * We can't use our normal waiter as we want to
3418
+ * avoid recursively trying to handle the current
3419
+ * reset. The basic dma_fence_default_wait() installs
3420
+ * a callback for dma_fence_signal(), which is
3421
+ * triggered by our nop handler (indirectly, the
3422
+ * callback enables the signaler thread which is
3423
+ * woken by the nop_submit_request() advancing the seqno
3424
+ * and when the seqno passes the fence, the signaler
3425
+ * then signals the fence waking us up).
3426
+ */
3427
+ if (dma_fence_default_wait (& rq -> fence , true,
3428
+ MAX_SCHEDULE_TIMEOUT ) < 0 )
3429
+ return false;
3433
3430
}
3434
3431
i915_retire_requests (i915 );
3435
3432
GEM_BUG_ON (i915 -> gt .active_requests );
@@ -3734,17 +3731,9 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
3734
3731
return ret ;
3735
3732
}
3736
3733
3737
- static int wait_for_timeline (struct i915_gem_timeline * tl , unsigned int flags )
3734
+ static int wait_for_timeline (struct i915_timeline * tl , unsigned int flags )
3738
3735
{
3739
- int ret , i ;
3740
-
3741
- for (i = 0 ; i < ARRAY_SIZE (tl -> engine ); i ++ ) {
3742
- ret = i915_gem_active_wait (& tl -> engine [i ].last_request , flags );
3743
- if (ret )
3744
- return ret ;
3745
- }
3746
-
3747
- return 0 ;
3736
+ return i915_gem_active_wait (& tl -> last_request , flags );
3748
3737
}
3749
3738
3750
3739
static int wait_for_engines (struct drm_i915_private * i915 )
@@ -3762,30 +3751,37 @@ static int wait_for_engines(struct drm_i915_private *i915)
3762
3751
3763
3752
int i915_gem_wait_for_idle (struct drm_i915_private * i915 , unsigned int flags )
3764
3753
{
3765
- int ret ;
3766
-
3767
3754
/* If the device is asleep, we have no requests outstanding */
3768
3755
if (!READ_ONCE (i915 -> gt .awake ))
3769
3756
return 0 ;
3770
3757
3771
3758
if (flags & I915_WAIT_LOCKED ) {
3772
- struct i915_gem_timeline * tl ;
3759
+ struct i915_timeline * tl ;
3760
+ int err ;
3773
3761
3774
3762
lockdep_assert_held (& i915 -> drm .struct_mutex );
3775
3763
3776
3764
list_for_each_entry (tl , & i915 -> gt .timelines , link ) {
3777
- ret = wait_for_timeline (tl , flags );
3778
- if (ret )
3779
- return ret ;
3765
+ err = wait_for_timeline (tl , flags );
3766
+ if (err )
3767
+ return err ;
3780
3768
}
3781
3769
i915_retire_requests (i915 );
3782
3770
3783
- ret = wait_for_engines (i915 );
3771
+ return wait_for_engines (i915 );
3784
3772
} else {
3785
- ret = wait_for_timeline (& i915 -> gt .execution_timeline , flags );
3786
- }
3773
+ struct intel_engine_cs * engine ;
3774
+ enum intel_engine_id id ;
3775
+ int err ;
3787
3776
3788
- return ret ;
3777
+ for_each_engine (engine , i915 , id ) {
3778
+ err = wait_for_timeline (& engine -> timeline , flags );
3779
+ if (err )
3780
+ return err ;
3781
+ }
3782
+
3783
+ return 0 ;
3784
+ }
3789
3785
}
3790
3786
3791
3787
static void __i915_gem_object_flush_for_display (struct drm_i915_gem_object * obj )
@@ -4954,7 +4950,7 @@ static void assert_kernel_context_is_current(struct drm_i915_private *i915)
4954
4950
enum intel_engine_id id ;
4955
4951
4956
4952
for_each_engine (engine , i915 , id ) {
4957
- GEM_BUG_ON (__i915_gem_active_peek (& engine -> timeline -> last_request ));
4953
+ GEM_BUG_ON (__i915_gem_active_peek (& engine -> timeline . last_request ));
4958
4954
GEM_BUG_ON (engine -> last_retired_context != kernel_context );
4959
4955
}
4960
4956
}
@@ -5603,12 +5599,6 @@ int i915_gem_init_early(struct drm_i915_private *dev_priv)
5603
5599
INIT_LIST_HEAD (& dev_priv -> gt .timelines );
5604
5600
INIT_LIST_HEAD (& dev_priv -> gt .active_rings );
5605
5601
5606
- mutex_lock (& dev_priv -> drm .struct_mutex );
5607
- err = i915_gem_timeline_init__global (dev_priv );
5608
- mutex_unlock (& dev_priv -> drm .struct_mutex );
5609
- if (err )
5610
- goto err_priorities ;
5611
-
5612
5602
i915_gem_init__mm (dev_priv );
5613
5603
5614
5604
INIT_DELAYED_WORK (& dev_priv -> gt .retire_work ,
@@ -5628,8 +5618,6 @@ int i915_gem_init_early(struct drm_i915_private *dev_priv)
5628
5618
5629
5619
return 0 ;
5630
5620
5631
- err_priorities :
5632
- kmem_cache_destroy (dev_priv -> priorities );
5633
5621
err_dependencies :
5634
5622
kmem_cache_destroy (dev_priv -> dependencies );
5635
5623
err_requests :
@@ -5650,12 +5638,7 @@ void i915_gem_cleanup_early(struct drm_i915_private *dev_priv)
5650
5638
GEM_BUG_ON (!llist_empty (& dev_priv -> mm .free_list ));
5651
5639
GEM_BUG_ON (atomic_read (& dev_priv -> mm .free_count ));
5652
5640
WARN_ON (dev_priv -> mm .object_count );
5653
-
5654
- mutex_lock (& dev_priv -> drm .struct_mutex );
5655
- i915_gem_timeline_fini (& dev_priv -> gt .legacy_timeline );
5656
- i915_gem_timeline_fini (& dev_priv -> gt .execution_timeline );
5657
5641
WARN_ON (!list_empty (& dev_priv -> gt .timelines ));
5658
- mutex_unlock (& dev_priv -> drm .struct_mutex );
5659
5642
5660
5643
kmem_cache_destroy (dev_priv -> priorities );
5661
5644
kmem_cache_destroy (dev_priv -> dependencies );
0 commit comments