@@ -892,6 +892,73 @@ static void musb_ep_program(struct musb *musb, u8 epnum,
892
892
}
893
893
}
894
894
895
+ /* Schedule next QH from musb->in_bulk/out_bulk and move the current qh to
896
+ * the end; avoids starvation for other endpoints.
897
+ */
898
+ static void musb_bulk_nak_timeout (struct musb * musb , struct musb_hw_ep * ep ,
899
+ int is_in )
900
+ {
901
+ struct dma_channel * dma ;
902
+ struct urb * urb ;
903
+ void __iomem * mbase = musb -> mregs ;
904
+ void __iomem * epio = ep -> regs ;
905
+ struct musb_qh * cur_qh , * next_qh ;
906
+ u16 rx_csr , tx_csr ;
907
+
908
+ musb_ep_select (mbase , ep -> epnum );
909
+ if (is_in ) {
910
+ dma = is_dma_capable () ? ep -> rx_channel : NULL ;
911
+
912
+ /* clear nak timeout bit */
913
+ rx_csr = musb_readw (epio , MUSB_RXCSR );
914
+ rx_csr |= MUSB_RXCSR_H_WZC_BITS ;
915
+ rx_csr &= ~MUSB_RXCSR_DATAERROR ;
916
+ musb_writew (epio , MUSB_RXCSR , rx_csr );
917
+
918
+ cur_qh = first_qh (& musb -> in_bulk );
919
+ } else {
920
+ dma = is_dma_capable () ? ep -> tx_channel : NULL ;
921
+
922
+ /* clear nak timeout bit */
923
+ tx_csr = musb_readw (epio , MUSB_TXCSR );
924
+ tx_csr |= MUSB_TXCSR_H_WZC_BITS ;
925
+ tx_csr &= ~MUSB_TXCSR_H_NAKTIMEOUT ;
926
+ musb_writew (epio , MUSB_TXCSR , tx_csr );
927
+
928
+ cur_qh = first_qh (& musb -> out_bulk );
929
+ }
930
+ if (cur_qh ) {
931
+ urb = next_urb (cur_qh );
932
+ if (dma_channel_status (dma ) == MUSB_DMA_STATUS_BUSY ) {
933
+ dma -> status = MUSB_DMA_STATUS_CORE_ABORT ;
934
+ musb -> dma_controller -> channel_abort (dma );
935
+ urb -> actual_length += dma -> actual_len ;
936
+ dma -> actual_len = 0L ;
937
+ }
938
+ musb_save_toggle (cur_qh , is_in , urb );
939
+
940
+ if (is_in ) {
941
+ /* move cur_qh to end of queue */
942
+ list_move_tail (& cur_qh -> ring , & musb -> in_bulk );
943
+
944
+ /* get the next qh from musb->in_bulk */
945
+ next_qh = first_qh (& musb -> in_bulk );
946
+
947
+ /* set rx_reinit and schedule the next qh */
948
+ ep -> rx_reinit = 1 ;
949
+ } else {
950
+ /* move cur_qh to end of queue */
951
+ list_move_tail (& cur_qh -> ring , & musb -> out_bulk );
952
+
953
+ /* get the next qh from musb->out_bulk */
954
+ next_qh = first_qh (& musb -> out_bulk );
955
+
956
+ /* set tx_reinit and schedule the next qh */
957
+ ep -> tx_reinit = 1 ;
958
+ }
959
+ musb_start_urb (musb , is_in , next_qh );
960
+ }
961
+ }
895
962
896
963
/*
897
964
* Service the default endpoint (ep0) as host.
@@ -1156,21 +1223,28 @@ void musb_host_tx(struct musb *musb, u8 epnum)
1156
1223
status = - ETIMEDOUT ;
1157
1224
1158
1225
} else if (tx_csr & MUSB_TXCSR_H_NAKTIMEOUT ) {
1159
- dev_dbg (musb -> controller , "TX end=%d device not responding\n" , epnum );
1160
-
1161
- /* NOTE: this code path would be a good place to PAUSE a
1162
- * transfer, if there's some other (nonperiodic) tx urb
1163
- * that could use this fifo. (dma complicates it...)
1164
- * That's already done for bulk RX transfers.
1165
- *
1166
- * if (bulk && qh->ring.next != &musb->out_bulk), then
1167
- * we have a candidate... NAKing is *NOT* an error
1168
- */
1169
- musb_ep_select (mbase , epnum );
1170
- musb_writew (epio , MUSB_TXCSR ,
1171
- MUSB_TXCSR_H_WZC_BITS
1172
- | MUSB_TXCSR_TXPKTRDY );
1173
- return ;
1226
+ if (USB_ENDPOINT_XFER_BULK == qh -> type && qh -> mux == 1
1227
+ && !list_is_singular (& musb -> out_bulk )) {
1228
+ dev_dbg (musb -> controller ,
1229
+ "NAK timeout on TX%d ep\n" , epnum );
1230
+ musb_bulk_nak_timeout (musb , hw_ep , 0 );
1231
+ } else {
1232
+ dev_dbg (musb -> controller ,
1233
+ "TX end=%d device not responding\n" , epnum );
1234
+ /* NOTE: this code path would be a good place to PAUSE a
1235
+ * transfer, if there's some other (nonperiodic) tx urb
1236
+ * that could use this fifo. (dma complicates it...)
1237
+ * That's already done for bulk RX transfers.
1238
+ *
1239
+ * if (bulk && qh->ring.next != &musb->out_bulk), then
1240
+ * we have a candidate... NAKing is *NOT* an error
1241
+ */
1242
+ musb_ep_select (mbase , epnum );
1243
+ musb_writew (epio , MUSB_TXCSR ,
1244
+ MUSB_TXCSR_H_WZC_BITS
1245
+ | MUSB_TXCSR_TXPKTRDY );
1246
+ }
1247
+ return ;
1174
1248
}
1175
1249
1176
1250
if (status ) {
@@ -1390,50 +1464,6 @@ void musb_host_tx(struct musb *musb, u8 epnum)
1390
1464
1391
1465
#endif
1392
1466
1393
- /* Schedule next QH from musb->in_bulk and move the current qh to
1394
- * the end; avoids starvation for other endpoints.
1395
- */
1396
- static void musb_bulk_rx_nak_timeout (struct musb * musb , struct musb_hw_ep * ep )
1397
- {
1398
- struct dma_channel * dma ;
1399
- struct urb * urb ;
1400
- void __iomem * mbase = musb -> mregs ;
1401
- void __iomem * epio = ep -> regs ;
1402
- struct musb_qh * cur_qh , * next_qh ;
1403
- u16 rx_csr ;
1404
-
1405
- musb_ep_select (mbase , ep -> epnum );
1406
- dma = is_dma_capable () ? ep -> rx_channel : NULL ;
1407
-
1408
- /* clear nak timeout bit */
1409
- rx_csr = musb_readw (epio , MUSB_RXCSR );
1410
- rx_csr |= MUSB_RXCSR_H_WZC_BITS ;
1411
- rx_csr &= ~MUSB_RXCSR_DATAERROR ;
1412
- musb_writew (epio , MUSB_RXCSR , rx_csr );
1413
-
1414
- cur_qh = first_qh (& musb -> in_bulk );
1415
- if (cur_qh ) {
1416
- urb = next_urb (cur_qh );
1417
- if (dma_channel_status (dma ) == MUSB_DMA_STATUS_BUSY ) {
1418
- dma -> status = MUSB_DMA_STATUS_CORE_ABORT ;
1419
- musb -> dma_controller -> channel_abort (dma );
1420
- urb -> actual_length += dma -> actual_len ;
1421
- dma -> actual_len = 0L ;
1422
- }
1423
- musb_save_toggle (cur_qh , 1 , urb );
1424
-
1425
- /* move cur_qh to end of queue */
1426
- list_move_tail (& cur_qh -> ring , & musb -> in_bulk );
1427
-
1428
- /* get the next qh from musb->in_bulk */
1429
- next_qh = first_qh (& musb -> in_bulk );
1430
-
1431
- /* set rx_reinit and schedule the next qh */
1432
- ep -> rx_reinit = 1 ;
1433
- musb_start_urb (musb , 1 , next_qh );
1434
- }
1435
- }
1436
-
1437
1467
/*
1438
1468
* Service an RX interrupt for the given IN endpoint; docs cover bulk, iso,
1439
1469
* and high-bandwidth IN transfer cases.
@@ -1510,7 +1540,7 @@ void musb_host_rx(struct musb *musb, u8 epnum)
1510
1540
if (usb_pipebulk (urb -> pipe )
1511
1541
&& qh -> mux == 1
1512
1542
&& !list_is_singular (& musb -> in_bulk )) {
1513
- musb_bulk_rx_nak_timeout (musb , hw_ep );
1543
+ musb_bulk_nak_timeout (musb , hw_ep , 1 );
1514
1544
return ;
1515
1545
}
1516
1546
musb_ep_select (mbase , epnum );
@@ -1873,14 +1903,14 @@ static int musb_schedule(
1873
1903
else
1874
1904
head = & musb -> out_bulk ;
1875
1905
1876
- /* Enable bulk RX NAK timeout scheme when bulk requests are
1906
+ /* Enable bulk RX/TX NAK timeout scheme when bulk requests are
1877
1907
* multiplexed. This scheme doen't work in high speed to full
1878
1908
* speed scenario as NAK interrupts are not coming from a
1879
1909
* full speed device connected to a high speed device.
1880
1910
* NAK timeout interval is 8 (128 uframe or 16ms) for HS and
1881
1911
* 4 (8 frame or 8ms) for FS device.
1882
1912
*/
1883
- if (is_in && qh -> dev )
1913
+ if (qh -> dev )
1884
1914
qh -> intv_reg =
1885
1915
(USB_SPEED_HIGH == qh -> dev -> speed ) ? 8 : 4 ;
1886
1916
goto success ;
0 commit comments