39
39
#define _EL_OFFSET_STATUS_BUF 0x370
40
40
#define _EL_OFFSET_STATUS_PTR 0x3A0
41
41
42
- #define execlist_ring_mmio (gvt , ring_id , offset ) \
43
- (gvt->dev_priv->engine[ring_id]->mmio_base + (offset))
42
+ #define execlist_ring_mmio (e , offset ) ((e)->mmio_base + (offset))
44
43
45
44
#define valid_context (ctx ) ((ctx)->valid)
46
45
#define same_context (a , b ) (((a)->context_id == (b)->context_id) && \
@@ -54,12 +53,12 @@ static int context_switch_events[] = {
54
53
[VECS0 ] = VECS_AS_CONTEXT_SWITCH ,
55
54
};
56
55
57
- static int ring_id_to_context_switch_event ( unsigned int ring_id )
56
+ static int to_context_switch_event ( const struct intel_engine_cs * engine )
58
57
{
59
- if (WARN_ON (ring_id >= ARRAY_SIZE (context_switch_events )))
58
+ if (WARN_ON (engine -> id >= ARRAY_SIZE (context_switch_events )))
60
59
return - EINVAL ;
61
60
62
- return context_switch_events [ring_id ];
61
+ return context_switch_events [engine -> id ];
63
62
}
64
63
65
64
static void switch_virtual_execlist_slot (struct intel_vgpu_execlist * execlist )
@@ -93,9 +92,8 @@ static void emulate_execlist_status(struct intel_vgpu_execlist *execlist)
93
92
struct execlist_ctx_descriptor_format * desc = execlist -> running_context ;
94
93
struct intel_vgpu * vgpu = execlist -> vgpu ;
95
94
struct execlist_status_format status ;
96
- int ring_id = execlist -> ring_id ;
97
- u32 status_reg = execlist_ring_mmio (vgpu -> gvt ,
98
- ring_id , _EL_OFFSET_STATUS );
95
+ u32 status_reg =
96
+ execlist_ring_mmio (execlist -> engine , _EL_OFFSET_STATUS );
99
97
100
98
status .ldw = vgpu_vreg (vgpu , status_reg );
101
99
status .udw = vgpu_vreg (vgpu , status_reg + 4 );
@@ -124,21 +122,19 @@ static void emulate_execlist_status(struct intel_vgpu_execlist *execlist)
124
122
}
125
123
126
124
static void emulate_csb_update (struct intel_vgpu_execlist * execlist ,
127
- struct execlist_context_status_format * status ,
128
- bool trigger_interrupt_later )
125
+ struct execlist_context_status_format * status ,
126
+ bool trigger_interrupt_later )
129
127
{
130
128
struct intel_vgpu * vgpu = execlist -> vgpu ;
131
- int ring_id = execlist -> ring_id ;
132
129
struct execlist_context_status_pointer_format ctx_status_ptr ;
133
130
u32 write_pointer ;
134
131
u32 ctx_status_ptr_reg , ctx_status_buf_reg , offset ;
135
132
unsigned long hwsp_gpa ;
136
- struct drm_i915_private * dev_priv = vgpu -> gvt -> dev_priv ;
137
133
138
- ctx_status_ptr_reg = execlist_ring_mmio ( vgpu -> gvt , ring_id ,
139
- _EL_OFFSET_STATUS_PTR );
140
- ctx_status_buf_reg = execlist_ring_mmio ( vgpu -> gvt , ring_id ,
141
- _EL_OFFSET_STATUS_BUF );
134
+ ctx_status_ptr_reg =
135
+ execlist_ring_mmio ( execlist -> engine , _EL_OFFSET_STATUS_PTR );
136
+ ctx_status_buf_reg =
137
+ execlist_ring_mmio ( execlist -> engine , _EL_OFFSET_STATUS_BUF );
142
138
143
139
ctx_status_ptr .dw = vgpu_vreg (vgpu , ctx_status_ptr_reg );
144
140
@@ -161,26 +157,24 @@ static void emulate_csb_update(struct intel_vgpu_execlist *execlist,
161
157
162
158
/* Update the CSB and CSB write pointer in HWSP */
163
159
hwsp_gpa = intel_vgpu_gma_to_gpa (vgpu -> gtt .ggtt_mm ,
164
- vgpu -> hws_pga [ring_id ]);
160
+ vgpu -> hws_pga [execlist -> engine -> id ]);
165
161
if (hwsp_gpa != INTEL_GVT_INVALID_ADDR ) {
166
162
intel_gvt_hypervisor_write_gpa (vgpu ,
167
- hwsp_gpa + I915_HWS_CSB_BUF0_INDEX * 4 +
168
- write_pointer * 8 ,
169
- status , 8 );
163
+ hwsp_gpa + I915_HWS_CSB_BUF0_INDEX * 4 + write_pointer * 8 ,
164
+ status , 8 );
170
165
intel_gvt_hypervisor_write_gpa (vgpu ,
171
- hwsp_gpa +
172
- intel_hws_csb_write_index (dev_priv ) * 4 ,
173
- & write_pointer , 4 );
166
+ hwsp_gpa + intel_hws_csb_write_index (execlist -> engine -> i915 ) * 4 ,
167
+ & write_pointer , 4 );
174
168
}
175
169
176
170
gvt_dbg_el ("vgpu%d: w pointer %u reg %x csb l %x csb h %x\n" ,
177
- vgpu -> id , write_pointer , offset , status -> ldw , status -> udw );
171
+ vgpu -> id , write_pointer , offset , status -> ldw , status -> udw );
178
172
179
173
if (trigger_interrupt_later )
180
174
return ;
181
175
182
176
intel_vgpu_trigger_virtual_event (vgpu ,
183
- ring_id_to_context_switch_event (execlist -> ring_id ));
177
+ to_context_switch_event (execlist -> engine ));
184
178
}
185
179
186
180
static int emulate_execlist_ctx_schedule_out (
@@ -261,9 +255,8 @@ static struct intel_vgpu_execlist_slot *get_next_execlist_slot(
261
255
struct intel_vgpu_execlist * execlist )
262
256
{
263
257
struct intel_vgpu * vgpu = execlist -> vgpu ;
264
- int ring_id = execlist -> ring_id ;
265
- u32 status_reg = execlist_ring_mmio (vgpu -> gvt , ring_id ,
266
- _EL_OFFSET_STATUS );
258
+ u32 status_reg =
259
+ execlist_ring_mmio (execlist -> engine , _EL_OFFSET_STATUS );
267
260
struct execlist_status_format status ;
268
261
269
262
status .ldw = vgpu_vreg (vgpu , status_reg );
@@ -379,7 +372,6 @@ static int prepare_execlist_workload(struct intel_vgpu_workload *workload)
379
372
struct intel_vgpu * vgpu = workload -> vgpu ;
380
373
struct intel_vgpu_submission * s = & vgpu -> submission ;
381
374
struct execlist_ctx_descriptor_format ctx [2 ];
382
- int ring_id = workload -> ring_id ;
383
375
int ret ;
384
376
385
377
if (!workload -> emulate_schedule_in )
@@ -388,7 +380,8 @@ static int prepare_execlist_workload(struct intel_vgpu_workload *workload)
388
380
ctx [0 ] = * get_desc_from_elsp_dwords (& workload -> elsp_dwords , 0 );
389
381
ctx [1 ] = * get_desc_from_elsp_dwords (& workload -> elsp_dwords , 1 );
390
382
391
- ret = emulate_execlist_schedule_in (& s -> execlist [ring_id ], ctx );
383
+ ret = emulate_execlist_schedule_in (& s -> execlist [workload -> engine -> id ],
384
+ ctx );
392
385
if (ret ) {
393
386
gvt_vgpu_err ("fail to emulate execlist schedule in\n" );
394
387
return ret ;
@@ -399,21 +392,21 @@ static int prepare_execlist_workload(struct intel_vgpu_workload *workload)
399
392
static int complete_execlist_workload (struct intel_vgpu_workload * workload )
400
393
{
401
394
struct intel_vgpu * vgpu = workload -> vgpu ;
402
- int ring_id = workload -> ring_id ;
403
395
struct intel_vgpu_submission * s = & vgpu -> submission ;
404
- struct intel_vgpu_execlist * execlist = & s -> execlist [ring_id ];
396
+ struct intel_vgpu_execlist * execlist =
397
+ & s -> execlist [workload -> engine -> id ];
405
398
struct intel_vgpu_workload * next_workload ;
406
- struct list_head * next = workload_q_head (vgpu , ring_id )-> next ;
399
+ struct list_head * next = workload_q_head (vgpu , workload -> engine )-> next ;
407
400
bool lite_restore = false;
408
401
int ret = 0 ;
409
402
410
- gvt_dbg_el ("complete workload %p status %d\n" , workload ,
411
- workload -> status );
403
+ gvt_dbg_el ("complete workload %p status %d\n" ,
404
+ workload , workload -> status );
412
405
413
- if (workload -> status || ( vgpu -> resetting_eng & BIT ( ring_id )) )
406
+ if (workload -> status || vgpu -> resetting_eng & workload -> engine -> mask )
414
407
goto out ;
415
408
416
- if (!list_empty (workload_q_head (vgpu , ring_id ))) {
409
+ if (!list_empty (workload_q_head (vgpu , workload -> engine ))) {
417
410
struct execlist_ctx_descriptor_format * this_desc , * next_desc ;
418
411
419
412
next_workload = container_of (next ,
@@ -436,14 +429,15 @@ static int complete_execlist_workload(struct intel_vgpu_workload *workload)
436
429
return ret ;
437
430
}
438
431
439
- static int submit_context (struct intel_vgpu * vgpu , int ring_id ,
440
- struct execlist_ctx_descriptor_format * desc ,
441
- bool emulate_schedule_in )
432
+ static int submit_context (struct intel_vgpu * vgpu ,
433
+ const struct intel_engine_cs * engine ,
434
+ struct execlist_ctx_descriptor_format * desc ,
435
+ bool emulate_schedule_in )
442
436
{
443
437
struct intel_vgpu_submission * s = & vgpu -> submission ;
444
438
struct intel_vgpu_workload * workload = NULL ;
445
439
446
- workload = intel_vgpu_create_workload (vgpu , ring_id , desc );
440
+ workload = intel_vgpu_create_workload (vgpu , engine , desc );
447
441
if (IS_ERR (workload ))
448
442
return PTR_ERR (workload );
449
443
@@ -452,19 +446,20 @@ static int submit_context(struct intel_vgpu *vgpu, int ring_id,
452
446
workload -> emulate_schedule_in = emulate_schedule_in ;
453
447
454
448
if (emulate_schedule_in )
455
- workload -> elsp_dwords = s -> execlist [ring_id ].elsp_dwords ;
449
+ workload -> elsp_dwords = s -> execlist [engine -> id ].elsp_dwords ;
456
450
457
451
gvt_dbg_el ("workload %p emulate schedule_in %d\n" , workload ,
458
- emulate_schedule_in );
452
+ emulate_schedule_in );
459
453
460
454
intel_vgpu_queue_workload (workload );
461
455
return 0 ;
462
456
}
463
457
464
- int intel_vgpu_submit_execlist (struct intel_vgpu * vgpu , int ring_id )
458
+ int intel_vgpu_submit_execlist (struct intel_vgpu * vgpu ,
459
+ const struct intel_engine_cs * engine )
465
460
{
466
461
struct intel_vgpu_submission * s = & vgpu -> submission ;
467
- struct intel_vgpu_execlist * execlist = & s -> execlist [ring_id ];
462
+ struct intel_vgpu_execlist * execlist = & s -> execlist [engine -> id ];
468
463
struct execlist_ctx_descriptor_format * desc [2 ];
469
464
int i , ret ;
470
465
@@ -489,7 +484,7 @@ int intel_vgpu_submit_execlist(struct intel_vgpu *vgpu, int ring_id)
489
484
for (i = 0 ; i < ARRAY_SIZE (desc ); i ++ ) {
490
485
if (!desc [i ]-> valid )
491
486
continue ;
492
- ret = submit_context (vgpu , ring_id , desc [i ], i == 0 );
487
+ ret = submit_context (vgpu , engine , desc [i ], i == 0 );
493
488
if (ret ) {
494
489
gvt_vgpu_err ("failed to submit desc %d\n" , i );
495
490
return ret ;
@@ -504,22 +499,22 @@ int intel_vgpu_submit_execlist(struct intel_vgpu *vgpu, int ring_id)
504
499
return - EINVAL ;
505
500
}
506
501
507
- static void init_vgpu_execlist (struct intel_vgpu * vgpu , int ring_id )
502
+ static void init_vgpu_execlist (struct intel_vgpu * vgpu ,
503
+ const struct intel_engine_cs * engine )
508
504
{
509
505
struct intel_vgpu_submission * s = & vgpu -> submission ;
510
- struct intel_vgpu_execlist * execlist = & s -> execlist [ring_id ];
506
+ struct intel_vgpu_execlist * execlist = & s -> execlist [engine -> id ];
511
507
struct execlist_context_status_pointer_format ctx_status_ptr ;
512
508
u32 ctx_status_ptr_reg ;
513
509
514
510
memset (execlist , 0 , sizeof (* execlist ));
515
511
516
512
execlist -> vgpu = vgpu ;
517
- execlist -> ring_id = ring_id ;
513
+ execlist -> engine = engine ;
518
514
execlist -> slot [0 ].index = 0 ;
519
515
execlist -> slot [1 ].index = 1 ;
520
516
521
- ctx_status_ptr_reg = execlist_ring_mmio (vgpu -> gvt , ring_id ,
522
- _EL_OFFSET_STATUS_PTR );
517
+ ctx_status_ptr_reg = execlist_ring_mmio (engine , _EL_OFFSET_STATUS_PTR );
523
518
ctx_status_ptr .dw = vgpu_vreg (vgpu , ctx_status_ptr_reg );
524
519
ctx_status_ptr .read_ptr = 0 ;
525
520
ctx_status_ptr .write_ptr = 0x7 ;
@@ -549,7 +544,7 @@ static void reset_execlist(struct intel_vgpu *vgpu,
549
544
intel_engine_mask_t tmp ;
550
545
551
546
for_each_engine_masked (engine , & dev_priv -> gt , engine_mask , tmp )
552
- init_vgpu_execlist (vgpu , engine -> id );
547
+ init_vgpu_execlist (vgpu , engine );
553
548
}
554
549
555
550
static int init_execlist (struct intel_vgpu * vgpu ,
0 commit comments