37
37
* "All components of all Command and Transfer TRBs shall be initialized to '0'"
38
38
*/
39
39
static struct xhci_segment * xhci_segment_alloc (struct xhci_hcd * xhci ,
40
- unsigned int cycle_state , gfp_t flags )
40
+ unsigned int cycle_state ,
41
+ unsigned int max_packet ,
42
+ gfp_t flags )
41
43
{
42
44
struct xhci_segment * seg ;
43
45
dma_addr_t dma ;
@@ -53,6 +55,14 @@ static struct xhci_segment *xhci_segment_alloc(struct xhci_hcd *xhci,
53
55
return NULL ;
54
56
}
55
57
58
+ if (max_packet ) {
59
+ seg -> bounce_buf = kzalloc (max_packet , flags | GFP_DMA );
60
+ if (!seg -> bounce_buf ) {
61
+ dma_pool_free (xhci -> segment_pool , seg -> trbs , dma );
62
+ kfree (seg );
63
+ return NULL ;
64
+ }
65
+ }
56
66
/* If the cycle state is 0, set the cycle bit to 1 for all the TRBs */
57
67
if (cycle_state == 0 ) {
58
68
for (i = 0 ; i < TRBS_PER_SEGMENT ; i ++ )
@@ -70,6 +80,7 @@ static void xhci_segment_free(struct xhci_hcd *xhci, struct xhci_segment *seg)
70
80
dma_pool_free (xhci -> segment_pool , seg -> trbs , seg -> dma );
71
81
seg -> trbs = NULL ;
72
82
}
83
+ kfree (seg -> bounce_buf );
73
84
kfree (seg );
74
85
}
75
86
@@ -317,11 +328,11 @@ static void xhci_initialize_ring_info(struct xhci_ring *ring,
317
328
static int xhci_alloc_segments_for_ring (struct xhci_hcd * xhci ,
318
329
struct xhci_segment * * first , struct xhci_segment * * last ,
319
330
unsigned int num_segs , unsigned int cycle_state ,
320
- enum xhci_ring_type type , gfp_t flags )
331
+ enum xhci_ring_type type , unsigned int max_packet , gfp_t flags )
321
332
{
322
333
struct xhci_segment * prev ;
323
334
324
- prev = xhci_segment_alloc (xhci , cycle_state , flags );
335
+ prev = xhci_segment_alloc (xhci , cycle_state , max_packet , flags );
325
336
if (!prev )
326
337
return - ENOMEM ;
327
338
num_segs -- ;
@@ -330,7 +341,7 @@ static int xhci_alloc_segments_for_ring(struct xhci_hcd *xhci,
330
341
while (num_segs > 0 ) {
331
342
struct xhci_segment * next ;
332
343
333
- next = xhci_segment_alloc (xhci , cycle_state , flags );
344
+ next = xhci_segment_alloc (xhci , cycle_state , max_packet , flags );
334
345
if (!next ) {
335
346
prev = * first ;
336
347
while (prev ) {
@@ -360,7 +371,7 @@ static int xhci_alloc_segments_for_ring(struct xhci_hcd *xhci,
360
371
*/
361
372
static struct xhci_ring * xhci_ring_alloc (struct xhci_hcd * xhci ,
362
373
unsigned int num_segs , unsigned int cycle_state ,
363
- enum xhci_ring_type type , gfp_t flags )
374
+ enum xhci_ring_type type , unsigned int max_packet , gfp_t flags )
364
375
{
365
376
struct xhci_ring * ring ;
366
377
int ret ;
@@ -370,13 +381,15 @@ static struct xhci_ring *xhci_ring_alloc(struct xhci_hcd *xhci,
370
381
return NULL ;
371
382
372
383
ring -> num_segs = num_segs ;
384
+ ring -> bounce_buf_len = max_packet ;
373
385
INIT_LIST_HEAD (& ring -> td_list );
374
386
ring -> type = type ;
375
387
if (num_segs == 0 )
376
388
return ring ;
377
389
378
390
ret = xhci_alloc_segments_for_ring (xhci , & ring -> first_seg ,
379
- & ring -> last_seg , num_segs , cycle_state , type , flags );
391
+ & ring -> last_seg , num_segs , cycle_state , type ,
392
+ max_packet , flags );
380
393
if (ret )
381
394
goto fail ;
382
395
@@ -470,7 +483,8 @@ int xhci_ring_expansion(struct xhci_hcd *xhci, struct xhci_ring *ring,
470
483
ring -> num_segs : num_segs_needed ;
471
484
472
485
ret = xhci_alloc_segments_for_ring (xhci , & first , & last ,
473
- num_segs , ring -> cycle_state , ring -> type , flags );
486
+ num_segs , ring -> cycle_state , ring -> type ,
487
+ ring -> bounce_buf_len , flags );
474
488
if (ret )
475
489
return - ENOMEM ;
476
490
@@ -652,7 +666,8 @@ struct xhci_ring *xhci_stream_id_to_ring(
652
666
*/
653
667
struct xhci_stream_info * xhci_alloc_stream_info (struct xhci_hcd * xhci ,
654
668
unsigned int num_stream_ctxs ,
655
- unsigned int num_streams , gfp_t mem_flags )
669
+ unsigned int num_streams ,
670
+ unsigned int max_packet , gfp_t mem_flags )
656
671
{
657
672
struct xhci_stream_info * stream_info ;
658
673
u32 cur_stream ;
@@ -704,9 +719,11 @@ struct xhci_stream_info *xhci_alloc_stream_info(struct xhci_hcd *xhci,
704
719
* and add their segment DMA addresses to the radix tree.
705
720
* Stream 0 is reserved.
706
721
*/
722
+
707
723
for (cur_stream = 1 ; cur_stream < num_streams ; cur_stream ++ ) {
708
724
stream_info -> stream_rings [cur_stream ] =
709
- xhci_ring_alloc (xhci , 2 , 1 , TYPE_STREAM , mem_flags );
725
+ xhci_ring_alloc (xhci , 2 , 1 , TYPE_STREAM , max_packet ,
726
+ mem_flags );
710
727
cur_ring = stream_info -> stream_rings [cur_stream ];
711
728
if (!cur_ring )
712
729
goto cleanup_rings ;
@@ -1003,7 +1020,7 @@ int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id,
1003
1020
}
1004
1021
1005
1022
/* Allocate endpoint 0 ring */
1006
- dev -> eps [0 ].ring = xhci_ring_alloc (xhci , 2 , 1 , TYPE_CTRL , flags );
1023
+ dev -> eps [0 ].ring = xhci_ring_alloc (xhci , 2 , 1 , TYPE_CTRL , 0 , flags );
1007
1024
if (!dev -> eps [0 ].ring )
1008
1025
goto fail ;
1009
1026
@@ -1434,22 +1451,6 @@ int xhci_endpoint_init(struct xhci_hcd *xhci,
1434
1451
return - EINVAL ;
1435
1452
1436
1453
ring_type = usb_endpoint_type (& ep -> desc );
1437
- /* Set up the endpoint ring */
1438
- virt_dev -> eps [ep_index ].new_ring =
1439
- xhci_ring_alloc (xhci , 2 , 1 , ring_type , mem_flags );
1440
- if (!virt_dev -> eps [ep_index ].new_ring ) {
1441
- /* Attempt to use the ring cache */
1442
- if (virt_dev -> num_rings_cached == 0 )
1443
- return - ENOMEM ;
1444
- virt_dev -> num_rings_cached -- ;
1445
- virt_dev -> eps [ep_index ].new_ring =
1446
- virt_dev -> ring_cache [virt_dev -> num_rings_cached ];
1447
- virt_dev -> ring_cache [virt_dev -> num_rings_cached ] = NULL ;
1448
- xhci_reinit_cached_ring (xhci , virt_dev -> eps [ep_index ].new_ring ,
1449
- 1 , ring_type );
1450
- }
1451
- virt_dev -> eps [ep_index ].skip = false;
1452
- ep_ring = virt_dev -> eps [ep_index ].new_ring ;
1453
1454
1454
1455
/*
1455
1456
* Get values to fill the endpoint context, mostly from ep descriptor.
@@ -1479,6 +1480,23 @@ int xhci_endpoint_init(struct xhci_hcd *xhci,
1479
1480
if ((xhci -> hci_version > 0x100 ) && HCC2_LEC (xhci -> hcc_params2 ))
1480
1481
mult = 0 ;
1481
1482
1483
+ /* Set up the endpoint ring */
1484
+ virt_dev -> eps [ep_index ].new_ring =
1485
+ xhci_ring_alloc (xhci , 2 , 1 , ring_type , max_packet , mem_flags );
1486
+ if (!virt_dev -> eps [ep_index ].new_ring ) {
1487
+ /* Attempt to use the ring cache */
1488
+ if (virt_dev -> num_rings_cached == 0 )
1489
+ return - ENOMEM ;
1490
+ virt_dev -> num_rings_cached -- ;
1491
+ virt_dev -> eps [ep_index ].new_ring =
1492
+ virt_dev -> ring_cache [virt_dev -> num_rings_cached ];
1493
+ virt_dev -> ring_cache [virt_dev -> num_rings_cached ] = NULL ;
1494
+ xhci_reinit_cached_ring (xhci , virt_dev -> eps [ep_index ].new_ring ,
1495
+ 1 , ring_type );
1496
+ }
1497
+ virt_dev -> eps [ep_index ].skip = false;
1498
+ ep_ring = virt_dev -> eps [ep_index ].new_ring ;
1499
+
1482
1500
/* Fill the endpoint context */
1483
1501
ep_ctx -> ep_info = cpu_to_le32 (EP_MAX_ESIT_PAYLOAD_HI (max_esit_payload ) |
1484
1502
EP_INTERVAL (interval ) |
@@ -2409,7 +2427,7 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
2409
2427
goto fail ;
2410
2428
2411
2429
/* Set up the command ring to have one segments for now. */
2412
- xhci -> cmd_ring = xhci_ring_alloc (xhci , 1 , 1 , TYPE_COMMAND , flags );
2430
+ xhci -> cmd_ring = xhci_ring_alloc (xhci , 1 , 1 , TYPE_COMMAND , 0 , flags );
2413
2431
if (!xhci -> cmd_ring )
2414
2432
goto fail ;
2415
2433
xhci_dbg_trace (xhci , trace_xhci_dbg_init ,
@@ -2454,7 +2472,7 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
2454
2472
*/
2455
2473
xhci_dbg_trace (xhci , trace_xhci_dbg_init , "// Allocating event ring" );
2456
2474
xhci -> event_ring = xhci_ring_alloc (xhci , ERST_NUM_SEGS , 1 , TYPE_EVENT ,
2457
- flags );
2475
+ 0 , flags );
2458
2476
if (!xhci -> event_ring )
2459
2477
goto fail ;
2460
2478
if (xhci_check_trb_in_td_math (xhci ) < 0 )
0 commit comments