37
37
38
38
#define MACB_RX_BUFFER_SIZE 128
39
39
#define RX_BUFFER_MULTIPLE 64 /* bytes */
40
- #define RX_RING_SIZE 512 /* must be power of 2 */
41
- #define RX_RING_BYTES (sizeof(struct macb_dma_desc) * RX_RING_SIZE)
40
+ #define DEFAULT_RX_RING_SIZE 512 /* must be power of 2 */
41
+ #define RX_RING_BYTES (bp ) (sizeof(struct macb_dma_desc) \
42
+ * (bp)->rx_ring_size)
42
43
43
- #define TX_RING_SIZE 128 /* must be power of 2 */
44
- #define TX_RING_BYTES (sizeof(struct macb_dma_desc) * TX_RING_SIZE)
44
+ #define DEFAULT_TX_RING_SIZE 512 /* must be power of 2 */
45
+ #define TX_RING_BYTES (bp ) (sizeof(struct macb_dma_desc) \
46
+ * (bp)->tx_ring_size)
45
47
46
48
/* level of occupied TX descriptors under which we wake up TX process */
47
- #define MACB_TX_WAKEUP_THRESH (3 * TX_RING_SIZE / 4)
49
+ #define MACB_TX_WAKEUP_THRESH ( bp ) (3 * (bp)->tx_ring_size / 4)
48
50
49
51
#define MACB_RX_INT_FLAGS (MACB_BIT(RCOMP) | MACB_BIT(RXUBR) \
50
52
| MACB_BIT(ISR_ROVR))
67
69
#define MACB_HALT_TIMEOUT 1230
68
70
69
71
/* Ring buffer accessors */
70
- static unsigned int macb_tx_ring_wrap (unsigned int index )
72
+ static unsigned int macb_tx_ring_wrap (struct macb * bp , unsigned int index )
71
73
{
72
- return index & (TX_RING_SIZE - 1 );
74
+ return index & (bp -> tx_ring_size - 1 );
73
75
}
74
76
75
77
static struct macb_dma_desc * macb_tx_desc (struct macb_queue * queue ,
76
78
unsigned int index )
77
79
{
78
- return & queue -> tx_ring [macb_tx_ring_wrap (index )];
80
+ return & queue -> tx_ring [macb_tx_ring_wrap (queue -> bp , index )];
79
81
}
80
82
81
83
static struct macb_tx_skb * macb_tx_skb (struct macb_queue * queue ,
82
84
unsigned int index )
83
85
{
84
- return & queue -> tx_skb [macb_tx_ring_wrap (index )];
86
+ return & queue -> tx_skb [macb_tx_ring_wrap (queue -> bp , index )];
85
87
}
86
88
87
89
static dma_addr_t macb_tx_dma (struct macb_queue * queue , unsigned int index )
88
90
{
89
91
dma_addr_t offset ;
90
92
91
- offset = macb_tx_ring_wrap (index ) * sizeof (struct macb_dma_desc );
93
+ offset = macb_tx_ring_wrap (queue -> bp , index ) *
94
+ sizeof (struct macb_dma_desc );
92
95
93
96
return queue -> tx_ring_dma + offset ;
94
97
}
95
98
96
- static unsigned int macb_rx_ring_wrap (unsigned int index )
99
+ static unsigned int macb_rx_ring_wrap (struct macb * bp , unsigned int index )
97
100
{
98
- return index & (RX_RING_SIZE - 1 );
101
+ return index & (bp -> rx_ring_size - 1 );
99
102
}
100
103
101
104
static struct macb_dma_desc * macb_rx_desc (struct macb * bp , unsigned int index )
102
105
{
103
- return & bp -> rx_ring [macb_rx_ring_wrap (index )];
106
+ return & bp -> rx_ring [macb_rx_ring_wrap (bp , index )];
104
107
}
105
108
106
109
static void * macb_rx_buffer (struct macb * bp , unsigned int index )
107
110
{
108
- return bp -> rx_buffers + bp -> rx_buffer_size * macb_rx_ring_wrap (index );
111
+ return bp -> rx_buffers + bp -> rx_buffer_size *
112
+ macb_rx_ring_wrap (bp , index );
109
113
}
110
114
111
115
/* I/O accessors */
@@ -608,7 +612,8 @@ static void macb_tx_error_task(struct work_struct *work)
608
612
*/
609
613
if (!(ctrl & MACB_BIT (TX_BUF_EXHAUSTED ))) {
610
614
netdev_vdbg (bp -> dev , "txerr skb %u (data %p) TX complete\n" ,
611
- macb_tx_ring_wrap (tail ), skb -> data );
615
+ macb_tx_ring_wrap (bp , tail ),
616
+ skb -> data );
612
617
bp -> stats .tx_packets ++ ;
613
618
bp -> stats .tx_bytes += skb -> len ;
614
619
}
@@ -700,7 +705,8 @@ static void macb_tx_interrupt(struct macb_queue *queue)
700
705
/* First, update TX stats if needed */
701
706
if (skb ) {
702
707
netdev_vdbg (bp -> dev , "skb %u (data %p) TX complete\n" ,
703
- macb_tx_ring_wrap (tail ), skb -> data );
708
+ macb_tx_ring_wrap (bp , tail ),
709
+ skb -> data );
704
710
bp -> stats .tx_packets ++ ;
705
711
bp -> stats .tx_bytes += skb -> len ;
706
712
}
@@ -720,7 +726,7 @@ static void macb_tx_interrupt(struct macb_queue *queue)
720
726
queue -> tx_tail = tail ;
721
727
if (__netif_subqueue_stopped (bp -> dev , queue_index ) &&
722
728
CIRC_CNT (queue -> tx_head , queue -> tx_tail ,
723
- TX_RING_SIZE ) <= MACB_TX_WAKEUP_THRESH )
729
+ bp -> tx_ring_size ) <= MACB_TX_WAKEUP_THRESH ( bp ) )
724
730
netif_wake_subqueue (bp -> dev , queue_index );
725
731
}
726
732
@@ -731,8 +737,8 @@ static void gem_rx_refill(struct macb *bp)
731
737
dma_addr_t paddr ;
732
738
733
739
while (CIRC_SPACE (bp -> rx_prepared_head , bp -> rx_tail ,
734
- RX_RING_SIZE ) > 0 ) {
735
- entry = macb_rx_ring_wrap (bp -> rx_prepared_head );
740
+ bp -> rx_ring_size ) > 0 ) {
741
+ entry = macb_rx_ring_wrap (bp , bp -> rx_prepared_head );
736
742
737
743
/* Make hw descriptor updates visible to CPU */
738
744
rmb ();
@@ -759,7 +765,7 @@ static void gem_rx_refill(struct macb *bp)
759
765
760
766
bp -> rx_skbuff [entry ] = skb ;
761
767
762
- if (entry == RX_RING_SIZE - 1 )
768
+ if (entry == bp -> rx_ring_size - 1 )
763
769
paddr |= MACB_BIT (RX_WRAP );
764
770
macb_set_addr (& (bp -> rx_ring [entry ]), paddr );
765
771
bp -> rx_ring [entry ].ctrl = 0 ;
@@ -813,7 +819,7 @@ static int gem_rx(struct macb *bp, int budget)
813
819
dma_addr_t addr ;
814
820
bool rxused ;
815
821
816
- entry = macb_rx_ring_wrap (bp -> rx_tail );
822
+ entry = macb_rx_ring_wrap (bp , bp -> rx_tail );
817
823
desc = & bp -> rx_ring [entry ];
818
824
819
825
/* Make hw descriptor updates visible to CPU */
@@ -895,8 +901,8 @@ static int macb_rx_frame(struct macb *bp, unsigned int first_frag,
895
901
len = desc -> ctrl & bp -> rx_frm_len_mask ;
896
902
897
903
netdev_vdbg (bp -> dev , "macb_rx_frame frags %u - %u (len %u)\n" ,
898
- macb_rx_ring_wrap (first_frag ),
899
- macb_rx_ring_wrap (last_frag ), len );
904
+ macb_rx_ring_wrap (bp , first_frag ),
905
+ macb_rx_ring_wrap (bp , last_frag ), len );
900
906
901
907
/* The ethernet header starts NET_IP_ALIGN bytes into the
902
908
* first buffer. Since the header is 14 bytes, this makes the
@@ -969,12 +975,12 @@ static inline void macb_init_rx_ring(struct macb *bp)
969
975
int i ;
970
976
971
977
addr = bp -> rx_buffers_dma ;
972
- for (i = 0 ; i < RX_RING_SIZE ; i ++ ) {
978
+ for (i = 0 ; i < bp -> rx_ring_size ; i ++ ) {
973
979
bp -> rx_ring [i ].addr = addr ;
974
980
bp -> rx_ring [i ].ctrl = 0 ;
975
981
addr += bp -> rx_buffer_size ;
976
982
}
977
- bp -> rx_ring [RX_RING_SIZE - 1 ].addr |= MACB_BIT (RX_WRAP );
983
+ bp -> rx_ring [bp -> rx_ring_size - 1 ].addr |= MACB_BIT (RX_WRAP );
978
984
}
979
985
980
986
static int macb_rx (struct macb * bp , int budget )
@@ -1228,7 +1234,7 @@ static unsigned int macb_tx_map(struct macb *bp,
1228
1234
offset = 0 ;
1229
1235
while (len ) {
1230
1236
size = min (len , bp -> max_tx_length );
1231
- entry = macb_tx_ring_wrap (tx_head );
1237
+ entry = macb_tx_ring_wrap (bp , tx_head );
1232
1238
tx_skb = & queue -> tx_skb [entry ];
1233
1239
1234
1240
mapping = dma_map_single (& bp -> pdev -> dev ,
@@ -1257,7 +1263,7 @@ static unsigned int macb_tx_map(struct macb *bp,
1257
1263
offset = 0 ;
1258
1264
while (len ) {
1259
1265
size = min (len , bp -> max_tx_length );
1260
- entry = macb_tx_ring_wrap (tx_head );
1266
+ entry = macb_tx_ring_wrap (bp , tx_head );
1261
1267
tx_skb = & queue -> tx_skb [entry ];
1262
1268
1263
1269
mapping = skb_frag_dma_map (& bp -> pdev -> dev , frag ,
@@ -1295,14 +1301,14 @@ static unsigned int macb_tx_map(struct macb *bp,
1295
1301
* to set the end of TX queue
1296
1302
*/
1297
1303
i = tx_head ;
1298
- entry = macb_tx_ring_wrap (i );
1304
+ entry = macb_tx_ring_wrap (bp , i );
1299
1305
ctrl = MACB_BIT (TX_USED );
1300
1306
desc = & queue -> tx_ring [entry ];
1301
1307
desc -> ctrl = ctrl ;
1302
1308
1303
1309
do {
1304
1310
i -- ;
1305
- entry = macb_tx_ring_wrap (i );
1311
+ entry = macb_tx_ring_wrap (bp , i );
1306
1312
tx_skb = & queue -> tx_skb [entry ];
1307
1313
desc = & queue -> tx_ring [entry ];
1308
1314
@@ -1311,7 +1317,7 @@ static unsigned int macb_tx_map(struct macb *bp,
1311
1317
ctrl |= MACB_BIT (TX_LAST );
1312
1318
eof = 0 ;
1313
1319
}
1314
- if (unlikely (entry == (TX_RING_SIZE - 1 )))
1320
+ if (unlikely (entry == (bp -> tx_ring_size - 1 )))
1315
1321
ctrl |= MACB_BIT (TX_WRAP );
1316
1322
1317
1323
/* Set TX buffer descriptor */
@@ -1388,7 +1394,8 @@ static int macb_start_xmit(struct sk_buff *skb, struct net_device *dev)
1388
1394
spin_lock_irqsave (& bp -> lock , flags );
1389
1395
1390
1396
/* This is a hard error, log it. */
1391
- if (CIRC_SPACE (queue -> tx_head , queue -> tx_tail , TX_RING_SIZE ) < count ) {
1397
+ if (CIRC_SPACE (queue -> tx_head , queue -> tx_tail ,
1398
+ bp -> tx_ring_size ) < count ) {
1392
1399
netif_stop_subqueue (dev , queue_index );
1393
1400
spin_unlock_irqrestore (& bp -> lock , flags );
1394
1401
netdev_dbg (bp -> dev , "tx_head = %u, tx_tail = %u\n" ,
@@ -1414,7 +1421,7 @@ static int macb_start_xmit(struct sk_buff *skb, struct net_device *dev)
1414
1421
1415
1422
macb_writel (bp , NCR , macb_readl (bp , NCR ) | MACB_BIT (TSTART ));
1416
1423
1417
- if (CIRC_SPACE (queue -> tx_head , queue -> tx_tail , TX_RING_SIZE ) < 1 )
1424
+ if (CIRC_SPACE (queue -> tx_head , queue -> tx_tail , bp -> tx_ring_size ) < 1 )
1418
1425
netif_stop_subqueue (dev , queue_index );
1419
1426
1420
1427
unlock :
@@ -1453,7 +1460,7 @@ static void gem_free_rx_buffers(struct macb *bp)
1453
1460
if (!bp -> rx_skbuff )
1454
1461
return ;
1455
1462
1456
- for (i = 0 ; i < RX_RING_SIZE ; i ++ ) {
1463
+ for (i = 0 ; i < bp -> rx_ring_size ; i ++ ) {
1457
1464
skb = bp -> rx_skbuff [i ];
1458
1465
1459
1466
if (!skb )
@@ -1478,7 +1485,7 @@ static void macb_free_rx_buffers(struct macb *bp)
1478
1485
{
1479
1486
if (bp -> rx_buffers ) {
1480
1487
dma_free_coherent (& bp -> pdev -> dev ,
1481
- RX_RING_SIZE * bp -> rx_buffer_size ,
1488
+ bp -> rx_ring_size * bp -> rx_buffer_size ,
1482
1489
bp -> rx_buffers , bp -> rx_buffers_dma );
1483
1490
bp -> rx_buffers = NULL ;
1484
1491
}
@@ -1491,7 +1498,7 @@ static void macb_free_consistent(struct macb *bp)
1491
1498
1492
1499
bp -> macbgem_ops .mog_free_rx_buffers (bp );
1493
1500
if (bp -> rx_ring ) {
1494
- dma_free_coherent (& bp -> pdev -> dev , RX_RING_BYTES ,
1501
+ dma_free_coherent (& bp -> pdev -> dev , RX_RING_BYTES ( bp ) ,
1495
1502
bp -> rx_ring , bp -> rx_ring_dma );
1496
1503
bp -> rx_ring = NULL ;
1497
1504
}
@@ -1500,7 +1507,7 @@ static void macb_free_consistent(struct macb *bp)
1500
1507
kfree (queue -> tx_skb );
1501
1508
queue -> tx_skb = NULL ;
1502
1509
if (queue -> tx_ring ) {
1503
- dma_free_coherent (& bp -> pdev -> dev , TX_RING_BYTES ,
1510
+ dma_free_coherent (& bp -> pdev -> dev , TX_RING_BYTES ( bp ) ,
1504
1511
queue -> tx_ring , queue -> tx_ring_dma );
1505
1512
queue -> tx_ring = NULL ;
1506
1513
}
@@ -1511,22 +1518,22 @@ static int gem_alloc_rx_buffers(struct macb *bp)
1511
1518
{
1512
1519
int size ;
1513
1520
1514
- size = RX_RING_SIZE * sizeof (struct sk_buff * );
1521
+ size = bp -> rx_ring_size * sizeof (struct sk_buff * );
1515
1522
bp -> rx_skbuff = kzalloc (size , GFP_KERNEL );
1516
1523
if (!bp -> rx_skbuff )
1517
1524
return - ENOMEM ;
1518
-
1519
- netdev_dbg (bp -> dev ,
1520
- "Allocated %d RX struct sk_buff entries at %p\n" ,
1521
- RX_RING_SIZE , bp -> rx_skbuff );
1525
+ else
1526
+ netdev_dbg (bp -> dev ,
1527
+ "Allocated %d RX struct sk_buff entries at %p\n" ,
1528
+ bp -> rx_ring_size , bp -> rx_skbuff );
1522
1529
return 0 ;
1523
1530
}
1524
1531
1525
1532
static int macb_alloc_rx_buffers (struct macb * bp )
1526
1533
{
1527
1534
int size ;
1528
1535
1529
- size = RX_RING_SIZE * bp -> rx_buffer_size ;
1536
+ size = bp -> rx_ring_size * bp -> rx_buffer_size ;
1530
1537
bp -> rx_buffers = dma_alloc_coherent (& bp -> pdev -> dev , size ,
1531
1538
& bp -> rx_buffers_dma , GFP_KERNEL );
1532
1539
if (!bp -> rx_buffers )
@@ -1545,7 +1552,7 @@ static int macb_alloc_consistent(struct macb *bp)
1545
1552
int size ;
1546
1553
1547
1554
for (q = 0 , queue = bp -> queues ; q < bp -> num_queues ; ++ q , ++ queue ) {
1548
- size = TX_RING_BYTES ;
1555
+ size = TX_RING_BYTES ( bp ) ;
1549
1556
queue -> tx_ring = dma_alloc_coherent (& bp -> pdev -> dev , size ,
1550
1557
& queue -> tx_ring_dma ,
1551
1558
GFP_KERNEL );
@@ -1556,13 +1563,13 @@ static int macb_alloc_consistent(struct macb *bp)
1556
1563
q , size , (unsigned long )queue -> tx_ring_dma ,
1557
1564
queue -> tx_ring );
1558
1565
1559
- size = TX_RING_SIZE * sizeof (struct macb_tx_skb );
1566
+ size = bp -> tx_ring_size * sizeof (struct macb_tx_skb );
1560
1567
queue -> tx_skb = kmalloc (size , GFP_KERNEL );
1561
1568
if (!queue -> tx_skb )
1562
1569
goto out_err ;
1563
1570
}
1564
1571
1565
- size = RX_RING_BYTES ;
1572
+ size = RX_RING_BYTES ( bp ) ;
1566
1573
bp -> rx_ring = dma_alloc_coherent (& bp -> pdev -> dev , size ,
1567
1574
& bp -> rx_ring_dma , GFP_KERNEL );
1568
1575
if (!bp -> rx_ring )
@@ -1588,11 +1595,11 @@ static void gem_init_rings(struct macb *bp)
1588
1595
int i ;
1589
1596
1590
1597
for (q = 0 , queue = bp -> queues ; q < bp -> num_queues ; ++ q , ++ queue ) {
1591
- for (i = 0 ; i < TX_RING_SIZE ; i ++ ) {
1592
- macb_set_addr ( & ( queue -> tx_ring [i ]), 0 ) ;
1598
+ for (i = 0 ; i < bp -> tx_ring_size ; i ++ ) {
1599
+ queue -> tx_ring [i ]. addr = 0 ;
1593
1600
queue -> tx_ring [i ].ctrl = MACB_BIT (TX_USED );
1594
1601
}
1595
- queue -> tx_ring [TX_RING_SIZE - 1 ].ctrl |= MACB_BIT (TX_WRAP );
1602
+ queue -> tx_ring [bp -> tx_ring_size - 1 ].ctrl |= MACB_BIT (TX_WRAP );
1596
1603
queue -> tx_head = 0 ;
1597
1604
queue -> tx_tail = 0 ;
1598
1605
}
@@ -1609,13 +1616,13 @@ static void macb_init_rings(struct macb *bp)
1609
1616
1610
1617
macb_init_rx_ring (bp );
1611
1618
1612
- for (i = 0 ; i < TX_RING_SIZE ; i ++ ) {
1619
+ for (i = 0 ; i < bp -> tx_ring_size ; i ++ ) {
1613
1620
bp -> queues [0 ].tx_ring [i ].addr = 0 ;
1614
1621
bp -> queues [0 ].tx_ring [i ].ctrl = MACB_BIT (TX_USED );
1615
1622
}
1616
1623
bp -> queues [0 ].tx_head = 0 ;
1617
1624
bp -> queues [0 ].tx_tail = 0 ;
1618
- bp -> queues [0 ].tx_ring [TX_RING_SIZE - 1 ].ctrl |= MACB_BIT (TX_WRAP );
1625
+ bp -> queues [0 ].tx_ring [bp -> tx_ring_size - 1 ].ctrl |= MACB_BIT (TX_WRAP );
1619
1626
1620
1627
bp -> rx_tail = 0 ;
1621
1628
}
@@ -2148,8 +2155,8 @@ static void macb_get_regs(struct net_device *dev, struct ethtool_regs *regs,
2148
2155
regs -> version = (macb_readl (bp , MID ) & ((1 << MACB_REV_SIZE ) - 1 ))
2149
2156
| MACB_GREGS_VERSION ;
2150
2157
2151
- tail = macb_tx_ring_wrap (bp -> queues [0 ].tx_tail );
2152
- head = macb_tx_ring_wrap (bp -> queues [0 ].tx_head );
2158
+ tail = macb_tx_ring_wrap (bp , bp -> queues [0 ].tx_tail );
2159
+ head = macb_tx_ring_wrap (bp , bp -> queues [0 ].tx_head );
2153
2160
2154
2161
regs_buff [0 ] = macb_readl (bp , NCR );
2155
2162
regs_buff [1 ] = macb_or_gem_readl (bp , NCFGR );
@@ -2419,6 +2426,9 @@ static int macb_init(struct platform_device *pdev)
2419
2426
int err ;
2420
2427
u32 val ;
2421
2428
2429
+ bp -> tx_ring_size = DEFAULT_TX_RING_SIZE ;
2430
+ bp -> rx_ring_size = DEFAULT_RX_RING_SIZE ;
2431
+
2422
2432
/* set the queue register mapping once for all: queue0 has a special
2423
2433
* register mapping but we don't want to test the queue index then
2424
2434
* compute the corresponding register offset at run time.
0 commit comments