Skip to content

Commit 674f843

Browse files
matnymangregkh
authored andcommitted
xhci: split handling halted endpoints into two steps
Don't queue both a reset endpoint command and a set TR deq command at once when handling a halted endpoint. split this into two steps. Initially only queue a reset endpoint command, and then if needed queue a set TR deq command in the reset endpoint handler. Note: This removes the RESET_EP_QUIRK handling which was added in commit ac9d8fe ("USB: xhci: Add quirk for Fresco Logic xHCI hardware.") This quirk was added in 2009 for prototype xHCI hardware meant for evaluation purposes only, and should not reach consumers. This hardware could not handle two commands queued at once, and had bad data in the output context after a reset endpoint command. After this patch two command are no longer queued at once, so that part is solved in this rewrite, but the workaround for bad data in the output context solved by issuing an extra configure endpoint command is bluntly removed. Adding this workaround to the new rewrite just adds complexity, and I think it's time to let this quirk go. Print a debug message instead. Signed-off-by: Mathias Nyman <[email protected]> Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Greg Kroah-Hartman <[email protected]>
1 parent 7c6c334 commit 674f843

File tree

3 files changed

+101
-179
lines changed

3 files changed

+101
-179
lines changed

drivers/usb/host/xhci-ring.c

Lines changed: 87 additions & 91 deletions
Original file line numberDiff line numberDiff line change
@@ -797,6 +797,30 @@ static int xhci_td_cleanup(struct xhci_hcd *xhci, struct xhci_td *td,
797797
return 0;
798798
}
799799

800+
801+
/* Complete the cancelled URBs we unlinked from td_list. */
802+
static void xhci_giveback_invalidated_tds(struct xhci_virt_ep *ep)
803+
{
804+
struct xhci_ring *ring;
805+
struct xhci_td *td, *tmp_td;
806+
807+
list_for_each_entry_safe(td, tmp_td, &ep->cancelled_td_list,
808+
cancelled_td_list) {
809+
810+
/*
811+
* Doesn't matter what we pass for status, since the core will
812+
* just overwrite it (because the URB has been unlinked).
813+
*/
814+
ring = xhci_urb_to_transfer_ring(ep->xhci, td->urb);
815+
816+
if (td->cancel_status == TD_CLEARED)
817+
xhci_td_cleanup(ep->xhci, td, ring, 0);
818+
819+
if (ep->xhci->xhc_state & XHCI_STATE_DYING)
820+
return;
821+
}
822+
}
823+
800824
static int xhci_reset_halted_ep(struct xhci_hcd *xhci, unsigned int slot_id,
801825
unsigned int ep_index, enum xhci_ep_reset_type reset_type)
802826
{
@@ -834,15 +858,19 @@ static void xhci_handle_halted_endpoint(struct xhci_hcd *xhci,
834858

835859
ep->ep_state |= EP_HALTED;
836860

861+
/* add td to cancelled list and let reset ep handler take care of it */
862+
if (reset_type == EP_HARD_RESET) {
863+
ep->ep_state |= EP_HARD_CLEAR_TOGGLE;
864+
if (td && list_empty(&td->cancelled_td_list)) {
865+
list_add_tail(&td->cancelled_td_list, &ep->cancelled_td_list);
866+
td->cancel_status = TD_HALTED;
867+
}
868+
}
869+
837870
err = xhci_reset_halted_ep(xhci, slot_id, ep->ep_index, reset_type);
838871
if (err)
839872
return;
840873

841-
if (reset_type == EP_HARD_RESET) {
842-
ep->ep_state |= EP_HARD_CLEAR_TOGGLE;
843-
xhci_cleanup_stalled_ring(xhci, slot_id, ep->ep_index, stream_id,
844-
td);
845-
}
846874
xhci_ring_cmd_db(xhci);
847875
}
848876

@@ -851,16 +879,20 @@ static void xhci_handle_halted_endpoint(struct xhci_hcd *xhci,
851879
* We have the xHCI lock, so nothing can modify this list until we drop it.
852880
* We're also in the event handler, so we can't get re-interrupted if another
853881
* Stop Endpoint command completes.
882+
*
883+
* only call this when ring is not in a running state
854884
*/
855885

856-
static int xhci_invalidate_cancelled_tds(struct xhci_virt_ep *ep,
857-
struct xhci_dequeue_state *deq_state)
886+
static int xhci_invalidate_cancelled_tds(struct xhci_virt_ep *ep)
858887
{
859888
struct xhci_hcd *xhci;
860889
struct xhci_td *td = NULL;
861890
struct xhci_td *tmp_td = NULL;
891+
struct xhci_td *cached_td = NULL;
862892
struct xhci_ring *ring;
893+
struct xhci_dequeue_state deq_state;
863894
u64 hw_deq;
895+
unsigned int slot_id = ep->vdev->slot_id;
864896

865897
xhci = ep->xhci;
866898

@@ -886,14 +918,28 @@ static int xhci_invalidate_cancelled_tds(struct xhci_virt_ep *ep,
886918

887919
if (trb_in_td(xhci, td->start_seg, td->first_trb,
888920
td->last_trb, hw_deq, false)) {
889-
xhci_find_new_dequeue_state(xhci, ep->vdev->slot_id,
890-
ep->ep_index,
891-
td->urb->stream_id,
892-
td, deq_state);
921+
switch (td->cancel_status) {
922+
case TD_CLEARED: /* TD is already no-op */
923+
case TD_CLEARING_CACHE: /* set TR deq command already queued */
924+
break;
925+
case TD_DIRTY: /* TD is cached, clear it */
926+
case TD_HALTED:
927+
/* FIXME stream case, several stopped rings */
928+
cached_td = td;
929+
break;
930+
}
893931
} else {
894932
td_to_noop(xhci, ring, td, false);
933+
td->cancel_status = TD_CLEARED;
895934
}
896-
935+
}
936+
if (cached_td) {
937+
cached_td->cancel_status = TD_CLEARING_CACHE;
938+
xhci_find_new_dequeue_state(xhci, slot_id, ep->ep_index,
939+
cached_td->urb->stream_id,
940+
cached_td, &deq_state);
941+
xhci_queue_new_dequeue_state(xhci, slot_id, ep->ep_index,
942+
&deq_state);
897943
}
898944
return 0;
899945
}
@@ -912,81 +958,32 @@ static void xhci_handle_cmd_stop_ep(struct xhci_hcd *xhci, int slot_id,
912958
union xhci_trb *trb)
913959
{
914960
unsigned int ep_index;
915-
struct xhci_ring *ep_ring;
916961
struct xhci_virt_ep *ep;
917-
struct xhci_td *cur_td = NULL;
918-
struct xhci_td *last_unlinked_td;
919962
struct xhci_ep_ctx *ep_ctx;
920-
struct xhci_virt_device *vdev;
921-
struct xhci_dequeue_state deq_state;
922963

923964
if (unlikely(TRB_TO_SUSPEND_PORT(le32_to_cpu(trb->generic.field[3])))) {
924965
if (!xhci->devs[slot_id])
925-
xhci_warn(xhci, "Stop endpoint command "
926-
"completion for disabled slot %u\n",
927-
slot_id);
966+
xhci_warn(xhci, "Stop endpoint command completion for disabled slot %u\n",
967+
slot_id);
928968
return;
929969
}
930970

931-
memset(&deq_state, 0, sizeof(deq_state));
932971
ep_index = TRB_TO_EP_INDEX(le32_to_cpu(trb->generic.field[3]));
933-
934972
ep = xhci_get_virt_ep(xhci, slot_id, ep_index);
935973
if (!ep)
936974
return;
937975

938-
vdev = ep->vdev;
939-
ep_ctx = xhci_get_ep_ctx(xhci, vdev->out_ctx, ep_index);
940-
trace_xhci_handle_cmd_stop_ep(ep_ctx);
941-
942-
last_unlinked_td = list_last_entry(&ep->cancelled_td_list,
943-
struct xhci_td, cancelled_td_list);
944-
945-
if (list_empty(&ep->cancelled_td_list)) {
946-
xhci_stop_watchdog_timer_in_irq(xhci, ep);
947-
ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
948-
return;
949-
}
976+
ep_ctx = xhci_get_ep_ctx(xhci, ep->vdev->out_ctx, ep_index);
950977

951-
xhci_invalidate_cancelled_tds(ep, &deq_state);
978+
trace_xhci_handle_cmd_stop_ep(ep_ctx);
952979

980+
/* will queue a set TR deq if stopped on a cancelled, uncleared TD */
981+
xhci_invalidate_cancelled_tds(ep);
953982
xhci_stop_watchdog_timer_in_irq(xhci, ep);
954983

955-
/* If necessary, queue a Set Transfer Ring Dequeue Pointer command */
956-
if (deq_state.new_deq_ptr && deq_state.new_deq_seg) {
957-
xhci_queue_new_dequeue_state(xhci, slot_id, ep_index,
958-
&deq_state);
959-
xhci_ring_cmd_db(xhci);
960-
} else {
961-
/* Otherwise ring the doorbell(s) to restart queued transfers */
962-
ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
963-
}
964-
965-
/*
966-
* Drop the lock and complete the URBs in the cancelled TD list.
967-
* New TDs to be cancelled might be added to the end of the list before
968-
* we can complete all the URBs for the TDs we already unlinked.
969-
* So stop when we've completed the URB for the last TD we unlinked.
970-
*/
971-
do {
972-
cur_td = list_first_entry(&ep->cancelled_td_list,
973-
struct xhci_td, cancelled_td_list);
974-
list_del_init(&cur_td->cancelled_td_list);
975-
976-
/* Doesn't matter what we pass for status, since the core will
977-
* just overwrite it (because the URB has been unlinked).
978-
*/
979-
ep_ring = xhci_urb_to_transfer_ring(xhci, cur_td->urb);
980-
xhci_td_cleanup(xhci, cur_td, ep_ring, 0);
981-
982-
/* Stop processing the cancelled list if the watchdog timer is
983-
* running.
984-
*/
985-
if (xhci->xhc_state & XHCI_STATE_DYING)
986-
return;
987-
} while (cur_td != last_unlinked_td);
988-
989-
/* Return to the event handler with xhci->lock re-acquired */
984+
/* Otherwise ring the doorbell(s) to restart queued transfers */
985+
xhci_giveback_invalidated_tds(ep);
986+
ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
990987
}
991988

992989
static void xhci_kill_ring_urbs(struct xhci_hcd *xhci, struct xhci_ring *ring)
@@ -1202,6 +1199,7 @@ static void xhci_handle_cmd_set_deq(struct xhci_hcd *xhci, int slot_id,
12021199
struct xhci_virt_ep *ep;
12031200
struct xhci_ep_ctx *ep_ctx;
12041201
struct xhci_slot_ctx *slot_ctx;
1202+
struct xhci_td *td, *tmp_td;
12051203

12061204
ep_index = TRB_TO_EP_INDEX(le32_to_cpu(trb->generic.field[3]));
12071205
stream_id = TRB_TO_STREAM_ID(le32_to_cpu(trb->generic.field[2]));
@@ -1279,7 +1277,15 @@ static void xhci_handle_cmd_set_deq(struct xhci_hcd *xhci, int slot_id,
12791277
ep->queued_deq_seg, ep->queued_deq_ptr);
12801278
}
12811279
}
1282-
1280+
/* HW cached TDs cleared from cache, give them back */
1281+
list_for_each_entry_safe(td, tmp_td, &ep->cancelled_td_list,
1282+
cancelled_td_list) {
1283+
ep_ring = xhci_urb_to_transfer_ring(ep->xhci, td->urb);
1284+
if (td->cancel_status == TD_CLEARING_CACHE) {
1285+
td->cancel_status = TD_CLEARED;
1286+
xhci_td_cleanup(ep->xhci, td, ep_ring, td->status);
1287+
}
1288+
}
12831289
cleanup:
12841290
ep->ep_state &= ~SET_DEQ_PENDING;
12851291
ep->queued_deq_seg = NULL;
@@ -1309,27 +1315,15 @@ static void xhci_handle_cmd_reset_ep(struct xhci_hcd *xhci, int slot_id,
13091315
xhci_dbg_trace(xhci, trace_xhci_dbg_reset_ep,
13101316
"Ignoring reset ep completion code of %u", cmd_comp_code);
13111317

1312-
/* HW with the reset endpoint quirk needs to have a configure endpoint
1313-
* command complete before the endpoint can be used. Queue that here
1314-
* because the HW can't handle two commands being queued in a row.
1315-
*/
1316-
if (xhci->quirks & XHCI_RESET_EP_QUIRK) {
1317-
struct xhci_command *command;
1318+
/* Cleanup cancelled TDs as ep is stopped. May queue a Set TR Deq cmd */
1319+
xhci_invalidate_cancelled_tds(ep);
13181320

1319-
command = xhci_alloc_command(xhci, false, GFP_ATOMIC);
1320-
if (!command)
1321-
return;
1321+
if (xhci->quirks & XHCI_RESET_EP_QUIRK)
1322+
xhci_dbg(xhci, "Note: Removed workaround to queue config ep for this hw");
1323+
/* Clear our internal halted state */
1324+
ep->ep_state &= ~EP_HALTED;
13221325

1323-
xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
1324-
"Queueing configure endpoint command");
1325-
xhci_queue_configure_endpoint(xhci, command,
1326-
xhci->devs[slot_id]->in_ctx->dma, slot_id,
1327-
false);
1328-
xhci_ring_cmd_db(xhci);
1329-
} else {
1330-
/* Clear our internal halted state */
1331-
ep->ep_state &= ~EP_HALTED;
1332-
}
1326+
xhci_giveback_invalidated_tds(ep);
13331327

13341328
/* if this was a soft reset, then restart */
13351329
if ((le32_to_cpu(trb->generic.field[3])) & TRB_TSP)
@@ -2070,7 +2064,9 @@ static int finish_td(struct xhci_hcd *xhci, struct xhci_td *td,
20702064
xhci_clear_hub_tt_buffer(xhci, td, ep);
20712065

20722066
xhci_handle_halted_endpoint(xhci, ep, ep_ring->stream_id, td,
2073-
EP_HARD_RESET);
2067+
EP_HARD_RESET);
2068+
2069+
return 0; /* xhci_handle_halted_endpoint marked td cancelled */
20742070
} else {
20752071
/* Update ring dequeue pointer */
20762072
ep_ring->dequeue = td->last_trb;

drivers/usb/host/xhci.c

Lines changed: 6 additions & 88 deletions
Original file line numberDiff line numberDiff line change
@@ -1440,15 +1440,6 @@ static unsigned int xhci_get_endpoint_flag(struct usb_endpoint_descriptor *desc)
14401440
return 1 << (xhci_get_endpoint_index(desc) + 1);
14411441
}
14421442

1443-
/* Find the flag for this endpoint (for use in the control context). Use the
1444-
* endpoint index to create a bitmask. The slot context is bit 0, endpoint 0 is
1445-
* bit 1, etc.
1446-
*/
1447-
static unsigned int xhci_get_endpoint_flag_from_index(unsigned int ep_index)
1448-
{
1449-
return 1 << (ep_index + 1);
1450-
}
1451-
14521443
/* Compute the last valid endpoint context index. Basically, this is the
14531444
* endpoint index plus one. For slot contexts with more than valid endpoint,
14541445
* we find the most significant bit set in the added contexts flags.
@@ -1810,7 +1801,12 @@ static int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
18101801

18111802
for (; i < urb_priv->num_tds; i++) {
18121803
td = &urb_priv->td[i];
1813-
list_add_tail(&td->cancelled_td_list, &ep->cancelled_td_list);
1804+
/* TD can already be on cancelled list if ep halted on it */
1805+
if (list_empty(&td->cancelled_td_list)) {
1806+
td->cancel_status = TD_DIRTY;
1807+
list_add_tail(&td->cancelled_td_list,
1808+
&ep->cancelled_td_list);
1809+
}
18141810
}
18151811

18161812
/* Queue a stop endpoint command, but only if this is
@@ -3119,84 +3115,6 @@ static void xhci_setup_input_ctx_for_config_ep(struct xhci_hcd *xhci,
31193115
ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG);
31203116
}
31213117

3122-
static void xhci_setup_input_ctx_for_quirk(struct xhci_hcd *xhci,
3123-
unsigned int slot_id, unsigned int ep_index,
3124-
struct xhci_dequeue_state *deq_state)
3125-
{
3126-
struct xhci_input_control_ctx *ctrl_ctx;
3127-
struct xhci_container_ctx *in_ctx;
3128-
struct xhci_ep_ctx *ep_ctx;
3129-
u32 added_ctxs;
3130-
dma_addr_t addr;
3131-
3132-
in_ctx = xhci->devs[slot_id]->in_ctx;
3133-
ctrl_ctx = xhci_get_input_control_ctx(in_ctx);
3134-
if (!ctrl_ctx) {
3135-
xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
3136-
__func__);
3137-
return;
3138-
}
3139-
3140-
xhci_endpoint_copy(xhci, xhci->devs[slot_id]->in_ctx,
3141-
xhci->devs[slot_id]->out_ctx, ep_index);
3142-
ep_ctx = xhci_get_ep_ctx(xhci, in_ctx, ep_index);
3143-
addr = xhci_trb_virt_to_dma(deq_state->new_deq_seg,
3144-
deq_state->new_deq_ptr);
3145-
if (addr == 0) {
3146-
xhci_warn(xhci, "WARN Cannot submit config ep after "
3147-
"reset ep command\n");
3148-
xhci_warn(xhci, "WARN deq seg = %p, deq ptr = %p\n",
3149-
deq_state->new_deq_seg,
3150-
deq_state->new_deq_ptr);
3151-
return;
3152-
}
3153-
ep_ctx->deq = cpu_to_le64(addr | deq_state->new_cycle_state);
3154-
3155-
added_ctxs = xhci_get_endpoint_flag_from_index(ep_index);
3156-
xhci_setup_input_ctx_for_config_ep(xhci, xhci->devs[slot_id]->in_ctx,
3157-
xhci->devs[slot_id]->out_ctx, ctrl_ctx,
3158-
added_ctxs, added_ctxs);
3159-
}
3160-
3161-
void xhci_cleanup_stalled_ring(struct xhci_hcd *xhci, unsigned int slot_id,
3162-
unsigned int ep_index, unsigned int stream_id,
3163-
struct xhci_td *td)
3164-
{
3165-
struct xhci_dequeue_state deq_state;
3166-
3167-
xhci_dbg_trace(xhci, trace_xhci_dbg_reset_ep,
3168-
"Cleaning up stalled endpoint ring");
3169-
/* We need to move the HW's dequeue pointer past this TD,
3170-
* or it will attempt to resend it on the next doorbell ring.
3171-
*/
3172-
xhci_find_new_dequeue_state(xhci, slot_id, ep_index, stream_id, td,
3173-
&deq_state);
3174-
3175-
if (!deq_state.new_deq_ptr || !deq_state.new_deq_seg)
3176-
return;
3177-
3178-
/* HW with the reset endpoint quirk will use the saved dequeue state to
3179-
* issue a configure endpoint command later.
3180-
*/
3181-
if (!(xhci->quirks & XHCI_RESET_EP_QUIRK)) {
3182-
xhci_dbg_trace(xhci, trace_xhci_dbg_reset_ep,
3183-
"Queueing new dequeue state");
3184-
xhci_queue_new_dequeue_state(xhci, slot_id,
3185-
ep_index, &deq_state);
3186-
} else {
3187-
/* Better hope no one uses the input context between now and the
3188-
* reset endpoint completion!
3189-
* XXX: No idea how this hardware will react when stream rings
3190-
* are enabled.
3191-
*/
3192-
xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
3193-
"Setting up input context for "
3194-
"configure endpoint command");
3195-
xhci_setup_input_ctx_for_quirk(xhci, slot_id,
3196-
ep_index, &deq_state);
3197-
}
3198-
}
3199-
32003118
static void xhci_endpoint_disable(struct usb_hcd *hcd,
32013119
struct usb_host_endpoint *host_ep)
32023120
{

0 commit comments

Comments
 (0)