@@ -286,6 +286,7 @@ static int reserve_gt(struct drm_i915_private *i915)
286
286
287
287
static void unreserve_gt (struct drm_i915_private * i915 )
288
288
{
289
+ GEM_BUG_ON (!i915 -> gt .active_requests );
289
290
if (!-- i915 -> gt .active_requests )
290
291
i915_gem_park (i915 );
291
292
}
@@ -298,6 +299,7 @@ void i915_gem_retire_noop(struct i915_gem_active *active,
298
299
299
300
static void advance_ring (struct i915_request * request )
300
301
{
302
+ struct intel_ring * ring = request -> ring ;
301
303
unsigned int tail ;
302
304
303
305
/*
@@ -309,7 +311,8 @@ static void advance_ring(struct i915_request *request)
309
311
* Note this requires that we are always called in request
310
312
* completion order.
311
313
*/
312
- if (list_is_last (& request -> ring_link , & request -> ring -> request_list )) {
314
+ GEM_BUG_ON (!list_is_first (& request -> ring_link , & ring -> request_list ));
315
+ if (list_is_last (& request -> ring_link , & ring -> request_list )) {
313
316
/*
314
317
* We may race here with execlists resubmitting this request
315
318
* as we retire it. The resubmission will move the ring->tail
@@ -322,9 +325,9 @@ static void advance_ring(struct i915_request *request)
322
325
} else {
323
326
tail = request -> postfix ;
324
327
}
325
- list_del (& request -> ring_link );
328
+ list_del_init (& request -> ring_link );
326
329
327
- request -> ring -> head = tail ;
330
+ ring -> head = tail ;
328
331
}
329
332
330
333
static void free_capture_list (struct i915_request * request )
@@ -340,30 +343,84 @@ static void free_capture_list(struct i915_request *request)
340
343
}
341
344
}
342
345
346
+ static void __retire_engine_request (struct intel_engine_cs * engine ,
347
+ struct i915_request * rq )
348
+ {
349
+ GEM_TRACE ("%s(%s) fence %llx:%d, global=%d, current %d\n" ,
350
+ __func__ , engine -> name ,
351
+ rq -> fence .context , rq -> fence .seqno ,
352
+ rq -> global_seqno ,
353
+ intel_engine_get_seqno (engine ));
354
+
355
+ GEM_BUG_ON (!i915_request_completed (rq ));
356
+
357
+ local_irq_disable ();
358
+
359
+ spin_lock (& engine -> timeline -> lock );
360
+ GEM_BUG_ON (!list_is_first (& rq -> link , & engine -> timeline -> requests ));
361
+ list_del_init (& rq -> link );
362
+ spin_unlock (& engine -> timeline -> lock );
363
+
364
+ spin_lock (& rq -> lock );
365
+ if (!test_bit (DMA_FENCE_FLAG_SIGNALED_BIT , & rq -> fence .flags ))
366
+ dma_fence_signal_locked (& rq -> fence );
367
+ if (test_bit (DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT , & rq -> fence .flags ))
368
+ intel_engine_cancel_signaling (rq );
369
+ if (rq -> waitboost ) {
370
+ GEM_BUG_ON (!atomic_read (& rq -> i915 -> gt_pm .rps .num_waiters ));
371
+ atomic_dec (& rq -> i915 -> gt_pm .rps .num_waiters );
372
+ }
373
+ spin_unlock (& rq -> lock );
374
+
375
+ local_irq_enable ();
376
+
377
+ /*
378
+ * The backing object for the context is done after switching to the
379
+ * *next* context. Therefore we cannot retire the previous context until
380
+ * the next context has already started running. However, since we
381
+ * cannot take the required locks at i915_request_submit() we
382
+ * defer the unpinning of the active context to now, retirement of
383
+ * the subsequent request.
384
+ */
385
+ if (engine -> last_retired_context )
386
+ intel_context_unpin (engine -> last_retired_context , engine );
387
+ engine -> last_retired_context = rq -> ctx ;
388
+ }
389
+
390
+ static void __retire_engine_upto (struct intel_engine_cs * engine ,
391
+ struct i915_request * rq )
392
+ {
393
+ struct i915_request * tmp ;
394
+
395
+ if (list_empty (& rq -> link ))
396
+ return ;
397
+
398
+ do {
399
+ tmp = list_first_entry (& engine -> timeline -> requests ,
400
+ typeof (* tmp ), link );
401
+
402
+ GEM_BUG_ON (tmp -> engine != engine );
403
+ __retire_engine_request (engine , tmp );
404
+ } while (tmp != rq );
405
+ }
406
+
343
407
static void i915_request_retire (struct i915_request * request )
344
408
{
345
- struct intel_engine_cs * engine = request -> engine ;
346
409
struct i915_gem_active * active , * next ;
347
410
348
411
GEM_TRACE ("%s fence %llx:%d, global=%d, current %d\n" ,
349
- engine -> name ,
412
+ request -> engine -> name ,
350
413
request -> fence .context , request -> fence .seqno ,
351
414
request -> global_seqno ,
352
- intel_engine_get_seqno (engine ));
415
+ intel_engine_get_seqno (request -> engine ));
353
416
354
417
lockdep_assert_held (& request -> i915 -> drm .struct_mutex );
355
418
GEM_BUG_ON (!i915_sw_fence_signaled (& request -> submit ));
356
419
GEM_BUG_ON (!i915_request_completed (request ));
357
- GEM_BUG_ON (!request -> i915 -> gt .active_requests );
358
420
359
421
trace_i915_request_retire (request );
360
422
361
- spin_lock_irq (& engine -> timeline -> lock );
362
- list_del_init (& request -> link );
363
- spin_unlock_irq (& engine -> timeline -> lock );
364
-
365
423
advance_ring (request );
366
-
367
424
free_capture_list (request );
368
425
369
426
/*
@@ -399,29 +456,9 @@ static void i915_request_retire(struct i915_request *request)
399
456
400
457
/* Retirement decays the ban score as it is a sign of ctx progress */
401
458
atomic_dec_if_positive (& request -> ctx -> ban_score );
459
+ intel_context_unpin (request -> ctx , request -> engine );
402
460
403
- /*
404
- * The backing object for the context is done after switching to the
405
- * *next* context. Therefore we cannot retire the previous context until
406
- * the next context has already started running. However, since we
407
- * cannot take the required locks at i915_request_submit() we
408
- * defer the unpinning of the active context to now, retirement of
409
- * the subsequent request.
410
- */
411
- if (engine -> last_retired_context )
412
- intel_context_unpin (engine -> last_retired_context , engine );
413
- engine -> last_retired_context = request -> ctx ;
414
-
415
- spin_lock_irq (& request -> lock );
416
- if (!test_bit (DMA_FENCE_FLAG_SIGNALED_BIT , & request -> fence .flags ))
417
- dma_fence_signal_locked (& request -> fence );
418
- if (test_bit (DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT , & request -> fence .flags ))
419
- intel_engine_cancel_signaling (request );
420
- if (request -> waitboost ) {
421
- GEM_BUG_ON (!atomic_read (& request -> i915 -> gt_pm .rps .num_waiters ));
422
- atomic_dec (& request -> i915 -> gt_pm .rps .num_waiters );
423
- }
424
- spin_unlock_irq (& request -> lock );
461
+ __retire_engine_upto (request -> engine , request );
425
462
426
463
unreserve_gt (request -> i915 );
427
464
@@ -431,18 +468,24 @@ static void i915_request_retire(struct i915_request *request)
431
468
432
469
void i915_request_retire_upto (struct i915_request * rq )
433
470
{
434
- struct intel_engine_cs * engine = rq -> engine ;
471
+ struct intel_ring * ring = rq -> ring ;
435
472
struct i915_request * tmp ;
436
473
474
+ GEM_TRACE ("%s fence %llx:%d, global=%d, current %d\n" ,
475
+ rq -> engine -> name ,
476
+ rq -> fence .context , rq -> fence .seqno ,
477
+ rq -> global_seqno ,
478
+ intel_engine_get_seqno (rq -> engine ));
479
+
437
480
lockdep_assert_held (& rq -> i915 -> drm .struct_mutex );
438
481
GEM_BUG_ON (!i915_request_completed (rq ));
439
482
440
- if (list_empty (& rq -> link ))
483
+ if (list_empty (& rq -> ring_link ))
441
484
return ;
442
485
443
486
do {
444
- tmp = list_first_entry (& engine -> timeline -> requests ,
445
- typeof (* tmp ), link );
487
+ tmp = list_first_entry (& ring -> request_list ,
488
+ typeof (* tmp ), ring_link );
446
489
447
490
i915_request_retire (tmp );
448
491
} while (tmp != rq );
@@ -651,9 +694,9 @@ i915_request_alloc(struct intel_engine_cs *engine, struct i915_gem_context *ctx)
651
694
if (ret )
652
695
goto err_unreserve ;
653
696
654
- /* Move the oldest request to the slab-cache (if not in use!) */
655
- rq = list_first_entry_or_null (& engine -> timeline -> requests ,
656
- typeof (* rq ), link );
697
+ /* Move our oldest request to the slab-cache (if not in use!) */
698
+ rq = list_first_entry_or_null (& ring -> request_list ,
699
+ typeof (* rq ), ring_link );
657
700
if (rq && i915_request_completed (rq ))
658
701
i915_request_retire (rq );
659
702
@@ -771,6 +814,9 @@ i915_request_alloc(struct intel_engine_cs *engine, struct i915_gem_context *ctx)
771
814
if (ret )
772
815
goto err_unwind ;
773
816
817
+ /* Keep a second pin for the dual retirement along engine and ring */
818
+ __intel_context_pin (rq -> ctx , engine );
819
+
774
820
/* Check that we didn't interrupt ourselves with a new request */
775
821
GEM_BUG_ON (rq -> timeline -> seqno != rq -> fence .seqno );
776
822
return rq ;
@@ -1357,38 +1403,30 @@ long i915_request_wait(struct i915_request *rq,
1357
1403
return timeout ;
1358
1404
}
1359
1405
1360
- static void engine_retire_requests (struct intel_engine_cs * engine )
1406
+ static void ring_retire_requests (struct intel_ring * ring )
1361
1407
{
1362
1408
struct i915_request * request , * next ;
1363
- u32 seqno = intel_engine_get_seqno (engine );
1364
- LIST_HEAD (retire );
1365
1409
1366
- spin_lock_irq (& engine -> timeline -> lock );
1367
1410
list_for_each_entry_safe (request , next ,
1368
- & engine -> timeline -> requests , link ) {
1369
- if (!i915_seqno_passed ( seqno , request -> global_seqno ))
1411
+ & ring -> request_list , ring_link ) {
1412
+ if (!i915_request_completed ( request ))
1370
1413
break ;
1371
1414
1372
- list_move_tail (& request -> link , & retire );
1373
- }
1374
- spin_unlock_irq (& engine -> timeline -> lock );
1375
-
1376
- list_for_each_entry_safe (request , next , & retire , link )
1377
1415
i915_request_retire (request );
1416
+ }
1378
1417
}
1379
1418
1380
1419
void i915_retire_requests (struct drm_i915_private * i915 )
1381
1420
{
1382
- struct intel_engine_cs * engine ;
1383
- enum intel_engine_id id ;
1421
+ struct intel_ring * ring , * next ;
1384
1422
1385
1423
lockdep_assert_held (& i915 -> drm .struct_mutex );
1386
1424
1387
1425
if (!i915 -> gt .active_requests )
1388
1426
return ;
1389
1427
1390
- for_each_engine ( engine , i915 , id )
1391
- engine_retire_requests ( engine );
1428
+ list_for_each_entry_safe ( ring , next , & i915 -> gt . rings , link )
1429
+ ring_retire_requests ( ring );
1392
1430
}
1393
1431
1394
1432
#if IS_ENABLED (CONFIG_DRM_I915_SELFTEST )
0 commit comments