@@ -797,6 +797,30 @@ static int xhci_td_cleanup(struct xhci_hcd *xhci, struct xhci_td *td,
797
797
return 0 ;
798
798
}
799
799
800
+
801
+ /* Complete the cancelled URBs we unlinked from td_list. */
802
+ static void xhci_giveback_invalidated_tds (struct xhci_virt_ep * ep )
803
+ {
804
+ struct xhci_ring * ring ;
805
+ struct xhci_td * td , * tmp_td ;
806
+
807
+ list_for_each_entry_safe (td , tmp_td , & ep -> cancelled_td_list ,
808
+ cancelled_td_list ) {
809
+
810
+ /*
811
+ * Doesn't matter what we pass for status, since the core will
812
+ * just overwrite it (because the URB has been unlinked).
813
+ */
814
+ ring = xhci_urb_to_transfer_ring (ep -> xhci , td -> urb );
815
+
816
+ if (td -> cancel_status == TD_CLEARED )
817
+ xhci_td_cleanup (ep -> xhci , td , ring , 0 );
818
+
819
+ if (ep -> xhci -> xhc_state & XHCI_STATE_DYING )
820
+ return ;
821
+ }
822
+ }
823
+
800
824
static int xhci_reset_halted_ep (struct xhci_hcd * xhci , unsigned int slot_id ,
801
825
unsigned int ep_index , enum xhci_ep_reset_type reset_type )
802
826
{
@@ -834,15 +858,19 @@ static void xhci_handle_halted_endpoint(struct xhci_hcd *xhci,
834
858
835
859
ep -> ep_state |= EP_HALTED ;
836
860
861
+ /* add td to cancelled list and let reset ep handler take care of it */
862
+ if (reset_type == EP_HARD_RESET ) {
863
+ ep -> ep_state |= EP_HARD_CLEAR_TOGGLE ;
864
+ if (td && list_empty (& td -> cancelled_td_list )) {
865
+ list_add_tail (& td -> cancelled_td_list , & ep -> cancelled_td_list );
866
+ td -> cancel_status = TD_HALTED ;
867
+ }
868
+ }
869
+
837
870
err = xhci_reset_halted_ep (xhci , slot_id , ep -> ep_index , reset_type );
838
871
if (err )
839
872
return ;
840
873
841
- if (reset_type == EP_HARD_RESET ) {
842
- ep -> ep_state |= EP_HARD_CLEAR_TOGGLE ;
843
- xhci_cleanup_stalled_ring (xhci , slot_id , ep -> ep_index , stream_id ,
844
- td );
845
- }
846
874
xhci_ring_cmd_db (xhci );
847
875
}
848
876
@@ -851,16 +879,20 @@ static void xhci_handle_halted_endpoint(struct xhci_hcd *xhci,
851
879
* We have the xHCI lock, so nothing can modify this list until we drop it.
852
880
* We're also in the event handler, so we can't get re-interrupted if another
853
881
* Stop Endpoint command completes.
882
+ *
883
+ * only call this when ring is not in a running state
854
884
*/
855
885
856
- static int xhci_invalidate_cancelled_tds (struct xhci_virt_ep * ep ,
857
- struct xhci_dequeue_state * deq_state )
886
+ static int xhci_invalidate_cancelled_tds (struct xhci_virt_ep * ep )
858
887
{
859
888
struct xhci_hcd * xhci ;
860
889
struct xhci_td * td = NULL ;
861
890
struct xhci_td * tmp_td = NULL ;
891
+ struct xhci_td * cached_td = NULL ;
862
892
struct xhci_ring * ring ;
893
+ struct xhci_dequeue_state deq_state ;
863
894
u64 hw_deq ;
895
+ unsigned int slot_id = ep -> vdev -> slot_id ;
864
896
865
897
xhci = ep -> xhci ;
866
898
@@ -886,14 +918,28 @@ static int xhci_invalidate_cancelled_tds(struct xhci_virt_ep *ep,
886
918
887
919
if (trb_in_td (xhci , td -> start_seg , td -> first_trb ,
888
920
td -> last_trb , hw_deq , false)) {
889
- xhci_find_new_dequeue_state (xhci , ep -> vdev -> slot_id ,
890
- ep -> ep_index ,
891
- td -> urb -> stream_id ,
892
- td , deq_state );
921
+ switch (td -> cancel_status ) {
922
+ case TD_CLEARED : /* TD is already no-op */
923
+ case TD_CLEARING_CACHE : /* set TR deq command already queued */
924
+ break ;
925
+ case TD_DIRTY : /* TD is cached, clear it */
926
+ case TD_HALTED :
927
+ /* FIXME stream case, several stopped rings */
928
+ cached_td = td ;
929
+ break ;
930
+ }
893
931
} else {
894
932
td_to_noop (xhci , ring , td , false);
933
+ td -> cancel_status = TD_CLEARED ;
895
934
}
896
-
935
+ }
936
+ if (cached_td ) {
937
+ cached_td -> cancel_status = TD_CLEARING_CACHE ;
938
+ xhci_find_new_dequeue_state (xhci , slot_id , ep -> ep_index ,
939
+ cached_td -> urb -> stream_id ,
940
+ cached_td , & deq_state );
941
+ xhci_queue_new_dequeue_state (xhci , slot_id , ep -> ep_index ,
942
+ & deq_state );
897
943
}
898
944
return 0 ;
899
945
}
@@ -912,81 +958,32 @@ static void xhci_handle_cmd_stop_ep(struct xhci_hcd *xhci, int slot_id,
912
958
union xhci_trb * trb )
913
959
{
914
960
unsigned int ep_index ;
915
- struct xhci_ring * ep_ring ;
916
961
struct xhci_virt_ep * ep ;
917
- struct xhci_td * cur_td = NULL ;
918
- struct xhci_td * last_unlinked_td ;
919
962
struct xhci_ep_ctx * ep_ctx ;
920
- struct xhci_virt_device * vdev ;
921
- struct xhci_dequeue_state deq_state ;
922
963
923
964
if (unlikely (TRB_TO_SUSPEND_PORT (le32_to_cpu (trb -> generic .field [3 ])))) {
924
965
if (!xhci -> devs [slot_id ])
925
- xhci_warn (xhci , "Stop endpoint command "
926
- "completion for disabled slot %u\n" ,
927
- slot_id );
966
+ xhci_warn (xhci , "Stop endpoint command completion for disabled slot %u\n" ,
967
+ slot_id );
928
968
return ;
929
969
}
930
970
931
- memset (& deq_state , 0 , sizeof (deq_state ));
932
971
ep_index = TRB_TO_EP_INDEX (le32_to_cpu (trb -> generic .field [3 ]));
933
-
934
972
ep = xhci_get_virt_ep (xhci , slot_id , ep_index );
935
973
if (!ep )
936
974
return ;
937
975
938
- vdev = ep -> vdev ;
939
- ep_ctx = xhci_get_ep_ctx (xhci , vdev -> out_ctx , ep_index );
940
- trace_xhci_handle_cmd_stop_ep (ep_ctx );
941
-
942
- last_unlinked_td = list_last_entry (& ep -> cancelled_td_list ,
943
- struct xhci_td , cancelled_td_list );
944
-
945
- if (list_empty (& ep -> cancelled_td_list )) {
946
- xhci_stop_watchdog_timer_in_irq (xhci , ep );
947
- ring_doorbell_for_active_rings (xhci , slot_id , ep_index );
948
- return ;
949
- }
976
+ ep_ctx = xhci_get_ep_ctx (xhci , ep -> vdev -> out_ctx , ep_index );
950
977
951
- xhci_invalidate_cancelled_tds ( ep , & deq_state );
978
+ trace_xhci_handle_cmd_stop_ep ( ep_ctx );
952
979
980
+ /* will queue a set TR deq if stopped on a cancelled, uncleared TD */
981
+ xhci_invalidate_cancelled_tds (ep );
953
982
xhci_stop_watchdog_timer_in_irq (xhci , ep );
954
983
955
- /* If necessary, queue a Set Transfer Ring Dequeue Pointer command */
956
- if (deq_state .new_deq_ptr && deq_state .new_deq_seg ) {
957
- xhci_queue_new_dequeue_state (xhci , slot_id , ep_index ,
958
- & deq_state );
959
- xhci_ring_cmd_db (xhci );
960
- } else {
961
- /* Otherwise ring the doorbell(s) to restart queued transfers */
962
- ring_doorbell_for_active_rings (xhci , slot_id , ep_index );
963
- }
964
-
965
- /*
966
- * Drop the lock and complete the URBs in the cancelled TD list.
967
- * New TDs to be cancelled might be added to the end of the list before
968
- * we can complete all the URBs for the TDs we already unlinked.
969
- * So stop when we've completed the URB for the last TD we unlinked.
970
- */
971
- do {
972
- cur_td = list_first_entry (& ep -> cancelled_td_list ,
973
- struct xhci_td , cancelled_td_list );
974
- list_del_init (& cur_td -> cancelled_td_list );
975
-
976
- /* Doesn't matter what we pass for status, since the core will
977
- * just overwrite it (because the URB has been unlinked).
978
- */
979
- ep_ring = xhci_urb_to_transfer_ring (xhci , cur_td -> urb );
980
- xhci_td_cleanup (xhci , cur_td , ep_ring , 0 );
981
-
982
- /* Stop processing the cancelled list if the watchdog timer is
983
- * running.
984
- */
985
- if (xhci -> xhc_state & XHCI_STATE_DYING )
986
- return ;
987
- } while (cur_td != last_unlinked_td );
988
-
989
- /* Return to the event handler with xhci->lock re-acquired */
984
+ /* Otherwise ring the doorbell(s) to restart queued transfers */
985
+ xhci_giveback_invalidated_tds (ep );
986
+ ring_doorbell_for_active_rings (xhci , slot_id , ep_index );
990
987
}
991
988
992
989
static void xhci_kill_ring_urbs (struct xhci_hcd * xhci , struct xhci_ring * ring )
@@ -1202,6 +1199,7 @@ static void xhci_handle_cmd_set_deq(struct xhci_hcd *xhci, int slot_id,
1202
1199
struct xhci_virt_ep * ep ;
1203
1200
struct xhci_ep_ctx * ep_ctx ;
1204
1201
struct xhci_slot_ctx * slot_ctx ;
1202
+ struct xhci_td * td , * tmp_td ;
1205
1203
1206
1204
ep_index = TRB_TO_EP_INDEX (le32_to_cpu (trb -> generic .field [3 ]));
1207
1205
stream_id = TRB_TO_STREAM_ID (le32_to_cpu (trb -> generic .field [2 ]));
@@ -1279,7 +1277,15 @@ static void xhci_handle_cmd_set_deq(struct xhci_hcd *xhci, int slot_id,
1279
1277
ep -> queued_deq_seg , ep -> queued_deq_ptr );
1280
1278
}
1281
1279
}
1282
-
1280
+ /* HW cached TDs cleared from cache, give them back */
1281
+ list_for_each_entry_safe (td , tmp_td , & ep -> cancelled_td_list ,
1282
+ cancelled_td_list ) {
1283
+ ep_ring = xhci_urb_to_transfer_ring (ep -> xhci , td -> urb );
1284
+ if (td -> cancel_status == TD_CLEARING_CACHE ) {
1285
+ td -> cancel_status = TD_CLEARED ;
1286
+ xhci_td_cleanup (ep -> xhci , td , ep_ring , td -> status );
1287
+ }
1288
+ }
1283
1289
cleanup :
1284
1290
ep -> ep_state &= ~SET_DEQ_PENDING ;
1285
1291
ep -> queued_deq_seg = NULL ;
@@ -1309,27 +1315,15 @@ static void xhci_handle_cmd_reset_ep(struct xhci_hcd *xhci, int slot_id,
1309
1315
xhci_dbg_trace (xhci , trace_xhci_dbg_reset_ep ,
1310
1316
"Ignoring reset ep completion code of %u" , cmd_comp_code );
1311
1317
1312
- /* HW with the reset endpoint quirk needs to have a configure endpoint
1313
- * command complete before the endpoint can be used. Queue that here
1314
- * because the HW can't handle two commands being queued in a row.
1315
- */
1316
- if (xhci -> quirks & XHCI_RESET_EP_QUIRK ) {
1317
- struct xhci_command * command ;
1318
+ /* Cleanup cancelled TDs as ep is stopped. May queue a Set TR Deq cmd */
1319
+ xhci_invalidate_cancelled_tds (ep );
1318
1320
1319
- command = xhci_alloc_command (xhci , false, GFP_ATOMIC );
1320
- if (!command )
1321
- return ;
1321
+ if (xhci -> quirks & XHCI_RESET_EP_QUIRK )
1322
+ xhci_dbg (xhci , "Note: Removed workaround to queue config ep for this hw" );
1323
+ /* Clear our internal halted state */
1324
+ ep -> ep_state &= ~EP_HALTED ;
1322
1325
1323
- xhci_dbg_trace (xhci , trace_xhci_dbg_quirks ,
1324
- "Queueing configure endpoint command" );
1325
- xhci_queue_configure_endpoint (xhci , command ,
1326
- xhci -> devs [slot_id ]-> in_ctx -> dma , slot_id ,
1327
- false);
1328
- xhci_ring_cmd_db (xhci );
1329
- } else {
1330
- /* Clear our internal halted state */
1331
- ep -> ep_state &= ~EP_HALTED ;
1332
- }
1326
+ xhci_giveback_invalidated_tds (ep );
1333
1327
1334
1328
/* if this was a soft reset, then restart */
1335
1329
if ((le32_to_cpu (trb -> generic .field [3 ])) & TRB_TSP )
@@ -2070,7 +2064,9 @@ static int finish_td(struct xhci_hcd *xhci, struct xhci_td *td,
2070
2064
xhci_clear_hub_tt_buffer (xhci , td , ep );
2071
2065
2072
2066
xhci_handle_halted_endpoint (xhci , ep , ep_ring -> stream_id , td ,
2073
- EP_HARD_RESET );
2067
+ EP_HARD_RESET );
2068
+
2069
+ return 0 ; /* xhci_handle_halted_endpoint marked td cancelled */
2074
2070
} else {
2075
2071
/* Update ring dequeue pointer */
2076
2072
ep_ring -> dequeue = td -> last_trb ;
0 commit comments