@@ -490,24 +490,52 @@ static void fill_engine_enable_masks(struct xe_gt *gt,
490
490
engine_enable_mask (gt , XE_ENGINE_CLASS_OTHER ));
491
491
}
492
492
493
- static void guc_prep_golden_lrc_null (struct xe_guc_ads * ads )
493
+ /*
494
+ * Write the offsets corresponding to the golden LRCs. The actual data is
495
+ * populated later by guc_golden_lrc_populate()
496
+ */
497
+ static void guc_golden_lrc_init (struct xe_guc_ads * ads )
494
498
{
495
499
struct xe_device * xe = ads_to_xe (ads );
500
+ struct xe_gt * gt = ads_to_gt (ads );
496
501
struct iosys_map info_map = IOSYS_MAP_INIT_OFFSET (ads_to_map (ads ),
497
502
offsetof(struct __guc_ads_blob , system_info ));
498
- u8 guc_class ;
503
+ size_t alloc_size , real_size ;
504
+ u32 addr_ggtt , offset ;
505
+ int class ;
506
+
507
+ offset = guc_ads_golden_lrc_offset (ads );
508
+ addr_ggtt = xe_bo_ggtt_addr (ads -> bo ) + offset ;
509
+
510
+ for (class = 0 ; class < XE_ENGINE_CLASS_MAX ; ++ class ) {
511
+ u8 guc_class ;
512
+
513
+ guc_class = xe_engine_class_to_guc_class (class );
499
514
500
- for (guc_class = 0 ; guc_class <= GUC_MAX_ENGINE_CLASSES ; ++ guc_class ) {
501
515
if (!info_map_read (xe , & info_map ,
502
516
engine_enabled_masks [guc_class ]))
503
517
continue ;
504
518
519
+ real_size = xe_gt_lrc_size (gt , class );
520
+ alloc_size = PAGE_ALIGN (real_size );
521
+
522
+ /*
523
+ * This interface is slightly confusing. We need to pass the
524
+ * base address of the full golden context and the size of just
525
+ * the engine state, which is the section of the context image
526
+ * that starts after the execlists LRC registers. This is
527
+ * required to allow the GuC to restore just the engine state
528
+ * when a watchdog reset occurs.
529
+ * We calculate the engine state size by removing the size of
530
+ * what comes before it in the context image (which is identical
531
+ * on all engines).
532
+ */
505
533
ads_blob_write (ads , ads .eng_state_size [guc_class ],
506
- guc_ads_golden_lrc_size (ads ) -
507
- xe_lrc_skip_size (xe ));
534
+ real_size - xe_lrc_skip_size (xe ));
508
535
ads_blob_write (ads , ads .golden_context_lrca [guc_class ],
509
- xe_bo_ggtt_addr (ads -> bo ) +
510
- guc_ads_golden_lrc_offset (ads ));
536
+ addr_ggtt );
537
+
538
+ addr_ggtt += alloc_size ;
511
539
}
512
540
}
513
541
@@ -857,7 +885,7 @@ void xe_guc_ads_populate_minimal(struct xe_guc_ads *ads)
857
885
858
886
xe_map_memset (ads_to_xe (ads ), ads_to_map (ads ), 0 , 0 , ads -> bo -> size );
859
887
guc_policies_init (ads );
860
- guc_prep_golden_lrc_null (ads );
888
+ guc_golden_lrc_init (ads );
861
889
guc_mapping_table_init_invalid (gt , & info_map );
862
890
guc_doorbell_init (ads );
863
891
@@ -883,7 +911,7 @@ void xe_guc_ads_populate(struct xe_guc_ads *ads)
883
911
guc_policies_init (ads );
884
912
fill_engine_enable_masks (gt , & info_map );
885
913
guc_mmio_reg_state_init (ads );
886
- guc_prep_golden_lrc_null (ads );
914
+ guc_golden_lrc_init (ads );
887
915
guc_mapping_table_init (gt , & info_map );
888
916
guc_capture_prep_lists (ads );
889
917
guc_doorbell_init (ads );
@@ -903,18 +931,22 @@ void xe_guc_ads_populate(struct xe_guc_ads *ads)
903
931
guc_ads_private_data_offset (ads ));
904
932
}
905
933
906
- static void guc_populate_golden_lrc (struct xe_guc_ads * ads )
934
+ /*
935
+ * After the golden LRC's are recorded for each engine class by the first
936
+ * submission, copy them to the ADS, as initialized earlier by
937
+ * guc_golden_lrc_init().
938
+ */
939
+ static void guc_golden_lrc_populate (struct xe_guc_ads * ads )
907
940
{
908
941
struct xe_device * xe = ads_to_xe (ads );
909
942
struct xe_gt * gt = ads_to_gt (ads );
910
943
struct iosys_map info_map = IOSYS_MAP_INIT_OFFSET (ads_to_map (ads ),
911
944
offsetof(struct __guc_ads_blob , system_info ));
912
945
size_t total_size = 0 , alloc_size , real_size ;
913
- u32 addr_ggtt , offset ;
946
+ u32 offset ;
914
947
int class ;
915
948
916
949
offset = guc_ads_golden_lrc_offset (ads );
917
- addr_ggtt = xe_bo_ggtt_addr (ads -> bo ) + offset ;
918
950
919
951
for (class = 0 ; class < XE_ENGINE_CLASS_MAX ; ++ class ) {
920
952
u8 guc_class ;
@@ -931,26 +963,9 @@ static void guc_populate_golden_lrc(struct xe_guc_ads *ads)
931
963
alloc_size = PAGE_ALIGN (real_size );
932
964
total_size += alloc_size ;
933
965
934
- /*
935
- * This interface is slightly confusing. We need to pass the
936
- * base address of the full golden context and the size of just
937
- * the engine state, which is the section of the context image
938
- * that starts after the execlists LRC registers. This is
939
- * required to allow the GuC to restore just the engine state
940
- * when a watchdog reset occurs.
941
- * We calculate the engine state size by removing the size of
942
- * what comes before it in the context image (which is identical
943
- * on all engines).
944
- */
945
- ads_blob_write (ads , ads .eng_state_size [guc_class ],
946
- real_size - xe_lrc_skip_size (xe ));
947
- ads_blob_write (ads , ads .golden_context_lrca [guc_class ],
948
- addr_ggtt );
949
-
950
966
xe_map_memcpy_to (xe , ads_to_map (ads ), offset ,
951
967
gt -> default_lrc [class ], real_size );
952
968
953
- addr_ggtt += alloc_size ;
954
969
offset += alloc_size ;
955
970
}
956
971
@@ -959,7 +974,7 @@ static void guc_populate_golden_lrc(struct xe_guc_ads *ads)
959
974
960
975
void xe_guc_ads_populate_post_load (struct xe_guc_ads * ads )
961
976
{
962
- guc_populate_golden_lrc (ads );
977
+ guc_golden_lrc_populate (ads );
963
978
}
964
979
965
980
static int guc_ads_action_update_policies (struct xe_guc_ads * ads , u32 policy_offset )
0 commit comments