Skip to content

Commit f283862

Browse files
Ajay Kumar GuptaFelipe Balbi
authored andcommitted
usb: musb: NAK timeout scheme on bulk TX endpoint
Fixes endpoint starvation issue when more than one bulk QH is multiplexed on the reserved bulk TX endpoint. This patch sets the NAK timeout interval for such QHs, and when a timeout triggers the next QH will be scheduled. This scheme doesn't work for devices which are connected to a high to full speed tree (transaction translator) as there is no NAK timeout interrupt from the musb controller from such devices. Signed-off-by: Ajay Kumar Gupta <[email protected]> Signed-off-by: Felipe Balbi <[email protected]>
1 parent 603fe2b commit f283862

File tree

1 file changed

+92
-62
lines changed

1 file changed

+92
-62
lines changed

drivers/usb/musb/musb_host.c

Lines changed: 92 additions & 62 deletions
Original file line numberDiff line numberDiff line change
@@ -892,6 +892,73 @@ static void musb_ep_program(struct musb *musb, u8 epnum,
892892
}
893893
}
894894

895+
/* Schedule next QH from musb->in_bulk/out_bulk and move the current qh to
896+
* the end; avoids starvation for other endpoints.
897+
*/
898+
static void musb_bulk_nak_timeout(struct musb *musb, struct musb_hw_ep *ep,
899+
int is_in)
900+
{
901+
struct dma_channel *dma;
902+
struct urb *urb;
903+
void __iomem *mbase = musb->mregs;
904+
void __iomem *epio = ep->regs;
905+
struct musb_qh *cur_qh, *next_qh;
906+
u16 rx_csr, tx_csr;
907+
908+
musb_ep_select(mbase, ep->epnum);
909+
if (is_in) {
910+
dma = is_dma_capable() ? ep->rx_channel : NULL;
911+
912+
/* clear nak timeout bit */
913+
rx_csr = musb_readw(epio, MUSB_RXCSR);
914+
rx_csr |= MUSB_RXCSR_H_WZC_BITS;
915+
rx_csr &= ~MUSB_RXCSR_DATAERROR;
916+
musb_writew(epio, MUSB_RXCSR, rx_csr);
917+
918+
cur_qh = first_qh(&musb->in_bulk);
919+
} else {
920+
dma = is_dma_capable() ? ep->tx_channel : NULL;
921+
922+
/* clear nak timeout bit */
923+
tx_csr = musb_readw(epio, MUSB_TXCSR);
924+
tx_csr |= MUSB_TXCSR_H_WZC_BITS;
925+
tx_csr &= ~MUSB_TXCSR_H_NAKTIMEOUT;
926+
musb_writew(epio, MUSB_TXCSR, tx_csr);
927+
928+
cur_qh = first_qh(&musb->out_bulk);
929+
}
930+
if (cur_qh) {
931+
urb = next_urb(cur_qh);
932+
if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
933+
dma->status = MUSB_DMA_STATUS_CORE_ABORT;
934+
musb->dma_controller->channel_abort(dma);
935+
urb->actual_length += dma->actual_len;
936+
dma->actual_len = 0L;
937+
}
938+
musb_save_toggle(cur_qh, is_in, urb);
939+
940+
if (is_in) {
941+
/* move cur_qh to end of queue */
942+
list_move_tail(&cur_qh->ring, &musb->in_bulk);
943+
944+
/* get the next qh from musb->in_bulk */
945+
next_qh = first_qh(&musb->in_bulk);
946+
947+
/* set rx_reinit and schedule the next qh */
948+
ep->rx_reinit = 1;
949+
} else {
950+
/* move cur_qh to end of queue */
951+
list_move_tail(&cur_qh->ring, &musb->out_bulk);
952+
953+
/* get the next qh from musb->out_bulk */
954+
next_qh = first_qh(&musb->out_bulk);
955+
956+
/* set tx_reinit and schedule the next qh */
957+
ep->tx_reinit = 1;
958+
}
959+
musb_start_urb(musb, is_in, next_qh);
960+
}
961+
}
895962

896963
/*
897964
* Service the default endpoint (ep0) as host.
@@ -1156,21 +1223,28 @@ void musb_host_tx(struct musb *musb, u8 epnum)
11561223
status = -ETIMEDOUT;
11571224

11581225
} else if (tx_csr & MUSB_TXCSR_H_NAKTIMEOUT) {
1159-
dev_dbg(musb->controller, "TX end=%d device not responding\n", epnum);
1160-
1161-
/* NOTE: this code path would be a good place to PAUSE a
1162-
* transfer, if there's some other (nonperiodic) tx urb
1163-
* that could use this fifo. (dma complicates it...)
1164-
* That's already done for bulk RX transfers.
1165-
*
1166-
* if (bulk && qh->ring.next != &musb->out_bulk), then
1167-
* we have a candidate... NAKing is *NOT* an error
1168-
*/
1169-
musb_ep_select(mbase, epnum);
1170-
musb_writew(epio, MUSB_TXCSR,
1171-
MUSB_TXCSR_H_WZC_BITS
1172-
| MUSB_TXCSR_TXPKTRDY);
1173-
return;
1226+
if (USB_ENDPOINT_XFER_BULK == qh->type && qh->mux == 1
1227+
&& !list_is_singular(&musb->out_bulk)) {
1228+
dev_dbg(musb->controller,
1229+
"NAK timeout on TX%d ep\n", epnum);
1230+
musb_bulk_nak_timeout(musb, hw_ep, 0);
1231+
} else {
1232+
dev_dbg(musb->controller,
1233+
"TX end=%d device not responding\n", epnum);
1234+
/* NOTE: this code path would be a good place to PAUSE a
1235+
* transfer, if there's some other (nonperiodic) tx urb
1236+
* that could use this fifo. (dma complicates it...)
1237+
* That's already done for bulk RX transfers.
1238+
*
1239+
* if (bulk && qh->ring.next != &musb->out_bulk), then
1240+
* we have a candidate... NAKing is *NOT* an error
1241+
*/
1242+
musb_ep_select(mbase, epnum);
1243+
musb_writew(epio, MUSB_TXCSR,
1244+
MUSB_TXCSR_H_WZC_BITS
1245+
| MUSB_TXCSR_TXPKTRDY);
1246+
}
1247+
return;
11741248
}
11751249

11761250
if (status) {
@@ -1390,50 +1464,6 @@ void musb_host_tx(struct musb *musb, u8 epnum)
13901464

13911465
#endif
13921466

1393-
/* Schedule next QH from musb->in_bulk and move the current qh to
1394-
* the end; avoids starvation for other endpoints.
1395-
*/
1396-
static void musb_bulk_rx_nak_timeout(struct musb *musb, struct musb_hw_ep *ep)
1397-
{
1398-
struct dma_channel *dma;
1399-
struct urb *urb;
1400-
void __iomem *mbase = musb->mregs;
1401-
void __iomem *epio = ep->regs;
1402-
struct musb_qh *cur_qh, *next_qh;
1403-
u16 rx_csr;
1404-
1405-
musb_ep_select(mbase, ep->epnum);
1406-
dma = is_dma_capable() ? ep->rx_channel : NULL;
1407-
1408-
/* clear nak timeout bit */
1409-
rx_csr = musb_readw(epio, MUSB_RXCSR);
1410-
rx_csr |= MUSB_RXCSR_H_WZC_BITS;
1411-
rx_csr &= ~MUSB_RXCSR_DATAERROR;
1412-
musb_writew(epio, MUSB_RXCSR, rx_csr);
1413-
1414-
cur_qh = first_qh(&musb->in_bulk);
1415-
if (cur_qh) {
1416-
urb = next_urb(cur_qh);
1417-
if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
1418-
dma->status = MUSB_DMA_STATUS_CORE_ABORT;
1419-
musb->dma_controller->channel_abort(dma);
1420-
urb->actual_length += dma->actual_len;
1421-
dma->actual_len = 0L;
1422-
}
1423-
musb_save_toggle(cur_qh, 1, urb);
1424-
1425-
/* move cur_qh to end of queue */
1426-
list_move_tail(&cur_qh->ring, &musb->in_bulk);
1427-
1428-
/* get the next qh from musb->in_bulk */
1429-
next_qh = first_qh(&musb->in_bulk);
1430-
1431-
/* set rx_reinit and schedule the next qh */
1432-
ep->rx_reinit = 1;
1433-
musb_start_urb(musb, 1, next_qh);
1434-
}
1435-
}
1436-
14371467
/*
14381468
* Service an RX interrupt for the given IN endpoint; docs cover bulk, iso,
14391469
* and high-bandwidth IN transfer cases.
@@ -1510,7 +1540,7 @@ void musb_host_rx(struct musb *musb, u8 epnum)
15101540
if (usb_pipebulk(urb->pipe)
15111541
&& qh->mux == 1
15121542
&& !list_is_singular(&musb->in_bulk)) {
1513-
musb_bulk_rx_nak_timeout(musb, hw_ep);
1543+
musb_bulk_nak_timeout(musb, hw_ep, 1);
15141544
return;
15151545
}
15161546
musb_ep_select(mbase, epnum);
@@ -1873,14 +1903,14 @@ static int musb_schedule(
18731903
else
18741904
head = &musb->out_bulk;
18751905

1876-
/* Enable bulk RX NAK timeout scheme when bulk requests are
1906+
/* Enable bulk RX/TX NAK timeout scheme when bulk requests are
18771907
* multiplexed. This scheme doen't work in high speed to full
18781908
* speed scenario as NAK interrupts are not coming from a
18791909
* full speed device connected to a high speed device.
18801910
* NAK timeout interval is 8 (128 uframe or 16ms) for HS and
18811911
* 4 (8 frame or 8ms) for FS device.
18821912
*/
1883-
if (is_in && qh->dev)
1913+
if (qh->dev)
18841914
qh->intv_reg =
18851915
(USB_SPEED_HIGH == qh->dev->speed) ? 8 : 4;
18861916
goto success;

0 commit comments

Comments
 (0)