@@ -130,19 +130,33 @@ void program_sh_mem_settings(struct device_queue_manager *dqm,
130
130
}
131
131
132
132
static void increment_queue_count (struct device_queue_manager * dqm ,
133
- enum kfd_queue_type type )
133
+ struct qcm_process_device * qpd ,
134
+ struct queue * q )
134
135
{
135
136
dqm -> active_queue_count ++ ;
136
- if (type == KFD_QUEUE_TYPE_COMPUTE || type == KFD_QUEUE_TYPE_DIQ )
137
+ if (q -> properties .type == KFD_QUEUE_TYPE_COMPUTE ||
138
+ q -> properties .type == KFD_QUEUE_TYPE_DIQ )
137
139
dqm -> active_cp_queue_count ++ ;
140
+
141
+ if (q -> properties .is_gws ) {
142
+ dqm -> gws_queue_count ++ ;
143
+ qpd -> mapped_gws_queue = true;
144
+ }
138
145
}
139
146
140
147
static void decrement_queue_count (struct device_queue_manager * dqm ,
141
- enum kfd_queue_type type )
148
+ struct qcm_process_device * qpd ,
149
+ struct queue * q )
142
150
{
143
151
dqm -> active_queue_count -- ;
144
- if (type == KFD_QUEUE_TYPE_COMPUTE || type == KFD_QUEUE_TYPE_DIQ )
152
+ if (q -> properties .type == KFD_QUEUE_TYPE_COMPUTE ||
153
+ q -> properties .type == KFD_QUEUE_TYPE_DIQ )
145
154
dqm -> active_cp_queue_count -- ;
155
+
156
+ if (q -> properties .is_gws ) {
157
+ dqm -> gws_queue_count -- ;
158
+ qpd -> mapped_gws_queue = false;
159
+ }
146
160
}
147
161
148
162
/*
@@ -412,7 +426,7 @@ static int create_queue_nocpsch(struct device_queue_manager *dqm,
412
426
list_add (& q -> list , & qpd -> queues_list );
413
427
qpd -> queue_count ++ ;
414
428
if (q -> properties .is_active )
415
- increment_queue_count (dqm , q -> properties . type );
429
+ increment_queue_count (dqm , qpd , q );
416
430
417
431
/*
418
432
* Unconditionally increment this counter, regardless of the queue's
@@ -601,13 +615,8 @@ static int destroy_queue_nocpsch_locked(struct device_queue_manager *dqm,
601
615
deallocate_vmid (dqm , qpd , q );
602
616
}
603
617
qpd -> queue_count -- ;
604
- if (q -> properties .is_active ) {
605
- decrement_queue_count (dqm , q -> properties .type );
606
- if (q -> properties .is_gws ) {
607
- dqm -> gws_queue_count -- ;
608
- qpd -> mapped_gws_queue = false;
609
- }
610
- }
618
+ if (q -> properties .is_active )
619
+ decrement_queue_count (dqm , qpd , q );
611
620
612
621
return retval ;
613
622
}
@@ -700,12 +709,11 @@ static int update_queue(struct device_queue_manager *dqm, struct queue *q,
700
709
* dqm->active_queue_count to determine whether a new runlist must be
701
710
* uploaded.
702
711
*/
703
- if (q -> properties .is_active && !prev_active )
704
- increment_queue_count (dqm , q -> properties .type );
705
- else if (!q -> properties .is_active && prev_active )
706
- decrement_queue_count (dqm , q -> properties .type );
707
-
708
- if (q -> gws && !q -> properties .is_gws ) {
712
+ if (q -> properties .is_active && !prev_active ) {
713
+ increment_queue_count (dqm , & pdd -> qpd , q );
714
+ } else if (!q -> properties .is_active && prev_active ) {
715
+ decrement_queue_count (dqm , & pdd -> qpd , q );
716
+ } else if (q -> gws && !q -> properties .is_gws ) {
709
717
if (q -> properties .is_active ) {
710
718
dqm -> gws_queue_count ++ ;
711
719
pdd -> qpd .mapped_gws_queue = true;
@@ -767,11 +775,7 @@ static int evict_process_queues_nocpsch(struct device_queue_manager *dqm,
767
775
mqd_mgr = dqm -> mqd_mgrs [get_mqd_type_from_queue_type (
768
776
q -> properties .type )];
769
777
q -> properties .is_active = false;
770
- decrement_queue_count (dqm , q -> properties .type );
771
- if (q -> properties .is_gws ) {
772
- dqm -> gws_queue_count -- ;
773
- qpd -> mapped_gws_queue = false;
774
- }
778
+ decrement_queue_count (dqm , qpd , q );
775
779
776
780
if (WARN_ONCE (!dqm -> sched_running , "Evict when stopped\n" ))
777
781
continue ;
@@ -817,7 +821,7 @@ static int evict_process_queues_cpsch(struct device_queue_manager *dqm,
817
821
continue ;
818
822
819
823
q -> properties .is_active = false;
820
- decrement_queue_count (dqm , q -> properties . type );
824
+ decrement_queue_count (dqm , qpd , q );
821
825
}
822
826
pdd -> last_evict_timestamp = get_jiffies_64 ();
823
827
retval = execute_queues_cpsch (dqm ,
@@ -888,11 +892,7 @@ static int restore_process_queues_nocpsch(struct device_queue_manager *dqm,
888
892
mqd_mgr = dqm -> mqd_mgrs [get_mqd_type_from_queue_type (
889
893
q -> properties .type )];
890
894
q -> properties .is_active = true;
891
- increment_queue_count (dqm , q -> properties .type );
892
- if (q -> properties .is_gws ) {
893
- dqm -> gws_queue_count ++ ;
894
- qpd -> mapped_gws_queue = true;
895
- }
895
+ increment_queue_count (dqm , qpd , q );
896
896
897
897
if (WARN_ONCE (!dqm -> sched_running , "Restore when stopped\n" ))
898
898
continue ;
@@ -950,7 +950,7 @@ static int restore_process_queues_cpsch(struct device_queue_manager *dqm,
950
950
continue ;
951
951
952
952
q -> properties .is_active = true;
953
- increment_queue_count (dqm , q -> properties . type );
953
+ increment_queue_count (dqm , & pdd -> qpd , q );
954
954
}
955
955
retval = execute_queues_cpsch (dqm ,
956
956
KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES , 0 );
@@ -1378,7 +1378,7 @@ static int create_kernel_queue_cpsch(struct device_queue_manager *dqm,
1378
1378
dqm -> total_queue_count );
1379
1379
1380
1380
list_add (& kq -> list , & qpd -> priv_queue_list );
1381
- increment_queue_count (dqm , kq -> queue -> properties . type );
1381
+ increment_queue_count (dqm , qpd , kq -> queue );
1382
1382
qpd -> is_debug = true;
1383
1383
execute_queues_cpsch (dqm , KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES , 0 );
1384
1384
dqm_unlock (dqm );
@@ -1392,7 +1392,7 @@ static void destroy_kernel_queue_cpsch(struct device_queue_manager *dqm,
1392
1392
{
1393
1393
dqm_lock (dqm );
1394
1394
list_del (& kq -> list );
1395
- decrement_queue_count (dqm , kq -> queue -> properties . type );
1395
+ decrement_queue_count (dqm , qpd , kq -> queue );
1396
1396
qpd -> is_debug = false;
1397
1397
execute_queues_cpsch (dqm , KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES , 0 );
1398
1398
/*
@@ -1467,7 +1467,7 @@ static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q,
1467
1467
qpd -> queue_count ++ ;
1468
1468
1469
1469
if (q -> properties .is_active ) {
1470
- increment_queue_count (dqm , q -> properties . type );
1470
+ increment_queue_count (dqm , qpd , q );
1471
1471
1472
1472
execute_queues_cpsch (dqm ,
1473
1473
KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES , 0 );
@@ -1683,15 +1683,11 @@ static int destroy_queue_cpsch(struct device_queue_manager *dqm,
1683
1683
list_del (& q -> list );
1684
1684
qpd -> queue_count -- ;
1685
1685
if (q -> properties .is_active ) {
1686
- decrement_queue_count (dqm , q -> properties . type );
1686
+ decrement_queue_count (dqm , qpd , q );
1687
1687
retval = execute_queues_cpsch (dqm ,
1688
1688
KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES , 0 );
1689
1689
if (retval == - ETIME )
1690
1690
qpd -> reset_wavefronts = true;
1691
- if (q -> properties .is_gws ) {
1692
- dqm -> gws_queue_count -- ;
1693
- qpd -> mapped_gws_queue = false;
1694
- }
1695
1691
}
1696
1692
1697
1693
/*
@@ -1932,7 +1928,7 @@ static int process_termination_cpsch(struct device_queue_manager *dqm,
1932
1928
/* Clean all kernel queues */
1933
1929
list_for_each_entry_safe (kq , kq_next , & qpd -> priv_queue_list , list ) {
1934
1930
list_del (& kq -> list );
1935
- decrement_queue_count (dqm , kq -> queue -> properties . type );
1931
+ decrement_queue_count (dqm , qpd , kq -> queue );
1936
1932
qpd -> is_debug = false;
1937
1933
dqm -> total_queue_count -- ;
1938
1934
filter = KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES ;
@@ -1945,13 +1941,8 @@ static int process_termination_cpsch(struct device_queue_manager *dqm,
1945
1941
else if (q -> properties .type == KFD_QUEUE_TYPE_SDMA_XGMI )
1946
1942
deallocate_sdma_queue (dqm , q );
1947
1943
1948
- if (q -> properties .is_active ) {
1949
- decrement_queue_count (dqm , q -> properties .type );
1950
- if (q -> properties .is_gws ) {
1951
- dqm -> gws_queue_count -- ;
1952
- qpd -> mapped_gws_queue = false;
1953
- }
1954
- }
1944
+ if (q -> properties .is_active )
1945
+ decrement_queue_count (dqm , qpd , q );
1955
1946
1956
1947
dqm -> total_queue_count -- ;
1957
1948
}
0 commit comments