Skip to content

Commit b410d13

Browse files
Zach Browndavem330
authored andcommitted
net: macb: Use variables with defaults for tx/rx ring sizes instead of hardcoded values
The macb driver hardcoded the tx/rx ring sizes. This made it impossible to change the sizes at run time. Add tx_ring_size, and rx_ring_size variables to macb object, which are initilized with default vales during macb_init. Change all references to RX_RING_SIZE and TX_RING_SIZE to their respective replacements. Signed-off-by: Zach Brown <[email protected]> Signed-off-by: David S. Miller <[email protected]>
1 parent 0f6e876 commit b410d13

File tree

2 files changed

+65
-52
lines changed

2 files changed

+65
-52
lines changed

drivers/net/ethernet/cadence/macb.c

Lines changed: 62 additions & 52 deletions
Original file line numberDiff line numberDiff line change
@@ -37,14 +37,16 @@
3737

3838
#define MACB_RX_BUFFER_SIZE 128
3939
#define RX_BUFFER_MULTIPLE 64 /* bytes */
40-
#define RX_RING_SIZE 512 /* must be power of 2 */
41-
#define RX_RING_BYTES (sizeof(struct macb_dma_desc) * RX_RING_SIZE)
40+
#define DEFAULT_RX_RING_SIZE 512 /* must be power of 2 */
41+
#define RX_RING_BYTES(bp) (sizeof(struct macb_dma_desc) \
42+
* (bp)->rx_ring_size)
4243

43-
#define TX_RING_SIZE 128 /* must be power of 2 */
44-
#define TX_RING_BYTES (sizeof(struct macb_dma_desc) * TX_RING_SIZE)
44+
#define DEFAULT_TX_RING_SIZE 512 /* must be power of 2 */
45+
#define TX_RING_BYTES(bp) (sizeof(struct macb_dma_desc) \
46+
* (bp)->tx_ring_size)
4547

4648
/* level of occupied TX descriptors under which we wake up TX process */
47-
#define MACB_TX_WAKEUP_THRESH (3 * TX_RING_SIZE / 4)
49+
#define MACB_TX_WAKEUP_THRESH(bp) (3 * (bp)->tx_ring_size / 4)
4850

4951
#define MACB_RX_INT_FLAGS (MACB_BIT(RCOMP) | MACB_BIT(RXUBR) \
5052
| MACB_BIT(ISR_ROVR))
@@ -67,45 +69,47 @@
6769
#define MACB_HALT_TIMEOUT 1230
6870

6971
/* Ring buffer accessors */
70-
static unsigned int macb_tx_ring_wrap(unsigned int index)
72+
static unsigned int macb_tx_ring_wrap(struct macb *bp, unsigned int index)
7173
{
72-
return index & (TX_RING_SIZE - 1);
74+
return index & (bp->tx_ring_size - 1);
7375
}
7476

7577
static struct macb_dma_desc *macb_tx_desc(struct macb_queue *queue,
7678
unsigned int index)
7779
{
78-
return &queue->tx_ring[macb_tx_ring_wrap(index)];
80+
return &queue->tx_ring[macb_tx_ring_wrap(queue->bp, index)];
7981
}
8082

8183
static struct macb_tx_skb *macb_tx_skb(struct macb_queue *queue,
8284
unsigned int index)
8385
{
84-
return &queue->tx_skb[macb_tx_ring_wrap(index)];
86+
return &queue->tx_skb[macb_tx_ring_wrap(queue->bp, index)];
8587
}
8688

8789
static dma_addr_t macb_tx_dma(struct macb_queue *queue, unsigned int index)
8890
{
8991
dma_addr_t offset;
9092

91-
offset = macb_tx_ring_wrap(index) * sizeof(struct macb_dma_desc);
93+
offset = macb_tx_ring_wrap(queue->bp, index) *
94+
sizeof(struct macb_dma_desc);
9295

9396
return queue->tx_ring_dma + offset;
9497
}
9598

96-
static unsigned int macb_rx_ring_wrap(unsigned int index)
99+
static unsigned int macb_rx_ring_wrap(struct macb *bp, unsigned int index)
97100
{
98-
return index & (RX_RING_SIZE - 1);
101+
return index & (bp->rx_ring_size - 1);
99102
}
100103

101104
static struct macb_dma_desc *macb_rx_desc(struct macb *bp, unsigned int index)
102105
{
103-
return &bp->rx_ring[macb_rx_ring_wrap(index)];
106+
return &bp->rx_ring[macb_rx_ring_wrap(bp, index)];
104107
}
105108

106109
static void *macb_rx_buffer(struct macb *bp, unsigned int index)
107110
{
108-
return bp->rx_buffers + bp->rx_buffer_size * macb_rx_ring_wrap(index);
111+
return bp->rx_buffers + bp->rx_buffer_size *
112+
macb_rx_ring_wrap(bp, index);
109113
}
110114

111115
/* I/O accessors */
@@ -608,7 +612,8 @@ static void macb_tx_error_task(struct work_struct *work)
608612
*/
609613
if (!(ctrl & MACB_BIT(TX_BUF_EXHAUSTED))) {
610614
netdev_vdbg(bp->dev, "txerr skb %u (data %p) TX complete\n",
611-
macb_tx_ring_wrap(tail), skb->data);
615+
macb_tx_ring_wrap(bp, tail),
616+
skb->data);
612617
bp->stats.tx_packets++;
613618
bp->stats.tx_bytes += skb->len;
614619
}
@@ -700,7 +705,8 @@ static void macb_tx_interrupt(struct macb_queue *queue)
700705
/* First, update TX stats if needed */
701706
if (skb) {
702707
netdev_vdbg(bp->dev, "skb %u (data %p) TX complete\n",
703-
macb_tx_ring_wrap(tail), skb->data);
708+
macb_tx_ring_wrap(bp, tail),
709+
skb->data);
704710
bp->stats.tx_packets++;
705711
bp->stats.tx_bytes += skb->len;
706712
}
@@ -720,7 +726,7 @@ static void macb_tx_interrupt(struct macb_queue *queue)
720726
queue->tx_tail = tail;
721727
if (__netif_subqueue_stopped(bp->dev, queue_index) &&
722728
CIRC_CNT(queue->tx_head, queue->tx_tail,
723-
TX_RING_SIZE) <= MACB_TX_WAKEUP_THRESH)
729+
bp->tx_ring_size) <= MACB_TX_WAKEUP_THRESH(bp))
724730
netif_wake_subqueue(bp->dev, queue_index);
725731
}
726732

@@ -731,8 +737,8 @@ static void gem_rx_refill(struct macb *bp)
731737
dma_addr_t paddr;
732738

733739
while (CIRC_SPACE(bp->rx_prepared_head, bp->rx_tail,
734-
RX_RING_SIZE) > 0) {
735-
entry = macb_rx_ring_wrap(bp->rx_prepared_head);
740+
bp->rx_ring_size) > 0) {
741+
entry = macb_rx_ring_wrap(bp, bp->rx_prepared_head);
736742

737743
/* Make hw descriptor updates visible to CPU */
738744
rmb();
@@ -759,7 +765,7 @@ static void gem_rx_refill(struct macb *bp)
759765

760766
bp->rx_skbuff[entry] = skb;
761767

762-
if (entry == RX_RING_SIZE - 1)
768+
if (entry == bp->rx_ring_size - 1)
763769
paddr |= MACB_BIT(RX_WRAP);
764770
macb_set_addr(&(bp->rx_ring[entry]), paddr);
765771
bp->rx_ring[entry].ctrl = 0;
@@ -813,7 +819,7 @@ static int gem_rx(struct macb *bp, int budget)
813819
dma_addr_t addr;
814820
bool rxused;
815821

816-
entry = macb_rx_ring_wrap(bp->rx_tail);
822+
entry = macb_rx_ring_wrap(bp, bp->rx_tail);
817823
desc = &bp->rx_ring[entry];
818824

819825
/* Make hw descriptor updates visible to CPU */
@@ -895,8 +901,8 @@ static int macb_rx_frame(struct macb *bp, unsigned int first_frag,
895901
len = desc->ctrl & bp->rx_frm_len_mask;
896902

897903
netdev_vdbg(bp->dev, "macb_rx_frame frags %u - %u (len %u)\n",
898-
macb_rx_ring_wrap(first_frag),
899-
macb_rx_ring_wrap(last_frag), len);
904+
macb_rx_ring_wrap(bp, first_frag),
905+
macb_rx_ring_wrap(bp, last_frag), len);
900906

901907
/* The ethernet header starts NET_IP_ALIGN bytes into the
902908
* first buffer. Since the header is 14 bytes, this makes the
@@ -969,12 +975,12 @@ static inline void macb_init_rx_ring(struct macb *bp)
969975
int i;
970976

971977
addr = bp->rx_buffers_dma;
972-
for (i = 0; i < RX_RING_SIZE; i++) {
978+
for (i = 0; i < bp->rx_ring_size; i++) {
973979
bp->rx_ring[i].addr = addr;
974980
bp->rx_ring[i].ctrl = 0;
975981
addr += bp->rx_buffer_size;
976982
}
977-
bp->rx_ring[RX_RING_SIZE - 1].addr |= MACB_BIT(RX_WRAP);
983+
bp->rx_ring[bp->rx_ring_size - 1].addr |= MACB_BIT(RX_WRAP);
978984
}
979985

980986
static int macb_rx(struct macb *bp, int budget)
@@ -1228,7 +1234,7 @@ static unsigned int macb_tx_map(struct macb *bp,
12281234
offset = 0;
12291235
while (len) {
12301236
size = min(len, bp->max_tx_length);
1231-
entry = macb_tx_ring_wrap(tx_head);
1237+
entry = macb_tx_ring_wrap(bp, tx_head);
12321238
tx_skb = &queue->tx_skb[entry];
12331239

12341240
mapping = dma_map_single(&bp->pdev->dev,
@@ -1257,7 +1263,7 @@ static unsigned int macb_tx_map(struct macb *bp,
12571263
offset = 0;
12581264
while (len) {
12591265
size = min(len, bp->max_tx_length);
1260-
entry = macb_tx_ring_wrap(tx_head);
1266+
entry = macb_tx_ring_wrap(bp, tx_head);
12611267
tx_skb = &queue->tx_skb[entry];
12621268

12631269
mapping = skb_frag_dma_map(&bp->pdev->dev, frag,
@@ -1295,14 +1301,14 @@ static unsigned int macb_tx_map(struct macb *bp,
12951301
* to set the end of TX queue
12961302
*/
12971303
i = tx_head;
1298-
entry = macb_tx_ring_wrap(i);
1304+
entry = macb_tx_ring_wrap(bp, i);
12991305
ctrl = MACB_BIT(TX_USED);
13001306
desc = &queue->tx_ring[entry];
13011307
desc->ctrl = ctrl;
13021308

13031309
do {
13041310
i--;
1305-
entry = macb_tx_ring_wrap(i);
1311+
entry = macb_tx_ring_wrap(bp, i);
13061312
tx_skb = &queue->tx_skb[entry];
13071313
desc = &queue->tx_ring[entry];
13081314

@@ -1311,7 +1317,7 @@ static unsigned int macb_tx_map(struct macb *bp,
13111317
ctrl |= MACB_BIT(TX_LAST);
13121318
eof = 0;
13131319
}
1314-
if (unlikely(entry == (TX_RING_SIZE - 1)))
1320+
if (unlikely(entry == (bp->tx_ring_size - 1)))
13151321
ctrl |= MACB_BIT(TX_WRAP);
13161322

13171323
/* Set TX buffer descriptor */
@@ -1388,7 +1394,8 @@ static int macb_start_xmit(struct sk_buff *skb, struct net_device *dev)
13881394
spin_lock_irqsave(&bp->lock, flags);
13891395

13901396
/* This is a hard error, log it. */
1391-
if (CIRC_SPACE(queue->tx_head, queue->tx_tail, TX_RING_SIZE) < count) {
1397+
if (CIRC_SPACE(queue->tx_head, queue->tx_tail,
1398+
bp->tx_ring_size) < count) {
13921399
netif_stop_subqueue(dev, queue_index);
13931400
spin_unlock_irqrestore(&bp->lock, flags);
13941401
netdev_dbg(bp->dev, "tx_head = %u, tx_tail = %u\n",
@@ -1414,7 +1421,7 @@ static int macb_start_xmit(struct sk_buff *skb, struct net_device *dev)
14141421

14151422
macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TSTART));
14161423

1417-
if (CIRC_SPACE(queue->tx_head, queue->tx_tail, TX_RING_SIZE) < 1)
1424+
if (CIRC_SPACE(queue->tx_head, queue->tx_tail, bp->tx_ring_size) < 1)
14181425
netif_stop_subqueue(dev, queue_index);
14191426

14201427
unlock:
@@ -1453,7 +1460,7 @@ static void gem_free_rx_buffers(struct macb *bp)
14531460
if (!bp->rx_skbuff)
14541461
return;
14551462

1456-
for (i = 0; i < RX_RING_SIZE; i++) {
1463+
for (i = 0; i < bp->rx_ring_size; i++) {
14571464
skb = bp->rx_skbuff[i];
14581465

14591466
if (!skb)
@@ -1478,7 +1485,7 @@ static void macb_free_rx_buffers(struct macb *bp)
14781485
{
14791486
if (bp->rx_buffers) {
14801487
dma_free_coherent(&bp->pdev->dev,
1481-
RX_RING_SIZE * bp->rx_buffer_size,
1488+
bp->rx_ring_size * bp->rx_buffer_size,
14821489
bp->rx_buffers, bp->rx_buffers_dma);
14831490
bp->rx_buffers = NULL;
14841491
}
@@ -1491,7 +1498,7 @@ static void macb_free_consistent(struct macb *bp)
14911498

14921499
bp->macbgem_ops.mog_free_rx_buffers(bp);
14931500
if (bp->rx_ring) {
1494-
dma_free_coherent(&bp->pdev->dev, RX_RING_BYTES,
1501+
dma_free_coherent(&bp->pdev->dev, RX_RING_BYTES(bp),
14951502
bp->rx_ring, bp->rx_ring_dma);
14961503
bp->rx_ring = NULL;
14971504
}
@@ -1500,7 +1507,7 @@ static void macb_free_consistent(struct macb *bp)
15001507
kfree(queue->tx_skb);
15011508
queue->tx_skb = NULL;
15021509
if (queue->tx_ring) {
1503-
dma_free_coherent(&bp->pdev->dev, TX_RING_BYTES,
1510+
dma_free_coherent(&bp->pdev->dev, TX_RING_BYTES(bp),
15041511
queue->tx_ring, queue->tx_ring_dma);
15051512
queue->tx_ring = NULL;
15061513
}
@@ -1511,22 +1518,22 @@ static int gem_alloc_rx_buffers(struct macb *bp)
15111518
{
15121519
int size;
15131520

1514-
size = RX_RING_SIZE * sizeof(struct sk_buff *);
1521+
size = bp->rx_ring_size * sizeof(struct sk_buff *);
15151522
bp->rx_skbuff = kzalloc(size, GFP_KERNEL);
15161523
if (!bp->rx_skbuff)
15171524
return -ENOMEM;
1518-
1519-
netdev_dbg(bp->dev,
1520-
"Allocated %d RX struct sk_buff entries at %p\n",
1521-
RX_RING_SIZE, bp->rx_skbuff);
1525+
else
1526+
netdev_dbg(bp->dev,
1527+
"Allocated %d RX struct sk_buff entries at %p\n",
1528+
bp->rx_ring_size, bp->rx_skbuff);
15221529
return 0;
15231530
}
15241531

15251532
static int macb_alloc_rx_buffers(struct macb *bp)
15261533
{
15271534
int size;
15281535

1529-
size = RX_RING_SIZE * bp->rx_buffer_size;
1536+
size = bp->rx_ring_size * bp->rx_buffer_size;
15301537
bp->rx_buffers = dma_alloc_coherent(&bp->pdev->dev, size,
15311538
&bp->rx_buffers_dma, GFP_KERNEL);
15321539
if (!bp->rx_buffers)
@@ -1545,7 +1552,7 @@ static int macb_alloc_consistent(struct macb *bp)
15451552
int size;
15461553

15471554
for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
1548-
size = TX_RING_BYTES;
1555+
size = TX_RING_BYTES(bp);
15491556
queue->tx_ring = dma_alloc_coherent(&bp->pdev->dev, size,
15501557
&queue->tx_ring_dma,
15511558
GFP_KERNEL);
@@ -1556,13 +1563,13 @@ static int macb_alloc_consistent(struct macb *bp)
15561563
q, size, (unsigned long)queue->tx_ring_dma,
15571564
queue->tx_ring);
15581565

1559-
size = TX_RING_SIZE * sizeof(struct macb_tx_skb);
1566+
size = bp->tx_ring_size * sizeof(struct macb_tx_skb);
15601567
queue->tx_skb = kmalloc(size, GFP_KERNEL);
15611568
if (!queue->tx_skb)
15621569
goto out_err;
15631570
}
15641571

1565-
size = RX_RING_BYTES;
1572+
size = RX_RING_BYTES(bp);
15661573
bp->rx_ring = dma_alloc_coherent(&bp->pdev->dev, size,
15671574
&bp->rx_ring_dma, GFP_KERNEL);
15681575
if (!bp->rx_ring)
@@ -1588,11 +1595,11 @@ static void gem_init_rings(struct macb *bp)
15881595
int i;
15891596

15901597
for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
1591-
for (i = 0; i < TX_RING_SIZE; i++) {
1592-
macb_set_addr(&(queue->tx_ring[i]), 0);
1598+
for (i = 0; i < bp->tx_ring_size; i++) {
1599+
queue->tx_ring[i].addr = 0;
15931600
queue->tx_ring[i].ctrl = MACB_BIT(TX_USED);
15941601
}
1595-
queue->tx_ring[TX_RING_SIZE - 1].ctrl |= MACB_BIT(TX_WRAP);
1602+
queue->tx_ring[bp->tx_ring_size - 1].ctrl |= MACB_BIT(TX_WRAP);
15961603
queue->tx_head = 0;
15971604
queue->tx_tail = 0;
15981605
}
@@ -1609,13 +1616,13 @@ static void macb_init_rings(struct macb *bp)
16091616

16101617
macb_init_rx_ring(bp);
16111618

1612-
for (i = 0; i < TX_RING_SIZE; i++) {
1619+
for (i = 0; i < bp->tx_ring_size; i++) {
16131620
bp->queues[0].tx_ring[i].addr = 0;
16141621
bp->queues[0].tx_ring[i].ctrl = MACB_BIT(TX_USED);
16151622
}
16161623
bp->queues[0].tx_head = 0;
16171624
bp->queues[0].tx_tail = 0;
1618-
bp->queues[0].tx_ring[TX_RING_SIZE - 1].ctrl |= MACB_BIT(TX_WRAP);
1625+
bp->queues[0].tx_ring[bp->tx_ring_size - 1].ctrl |= MACB_BIT(TX_WRAP);
16191626

16201627
bp->rx_tail = 0;
16211628
}
@@ -2148,8 +2155,8 @@ static void macb_get_regs(struct net_device *dev, struct ethtool_regs *regs,
21482155
regs->version = (macb_readl(bp, MID) & ((1 << MACB_REV_SIZE) - 1))
21492156
| MACB_GREGS_VERSION;
21502157

2151-
tail = macb_tx_ring_wrap(bp->queues[0].tx_tail);
2152-
head = macb_tx_ring_wrap(bp->queues[0].tx_head);
2158+
tail = macb_tx_ring_wrap(bp, bp->queues[0].tx_tail);
2159+
head = macb_tx_ring_wrap(bp, bp->queues[0].tx_head);
21532160

21542161
regs_buff[0] = macb_readl(bp, NCR);
21552162
regs_buff[1] = macb_or_gem_readl(bp, NCFGR);
@@ -2419,6 +2426,9 @@ static int macb_init(struct platform_device *pdev)
24192426
int err;
24202427
u32 val;
24212428

2429+
bp->tx_ring_size = DEFAULT_TX_RING_SIZE;
2430+
bp->rx_ring_size = DEFAULT_RX_RING_SIZE;
2431+
24222432
/* set the queue register mapping once for all: queue0 has a special
24232433
* register mapping but we don't want to test the queue index then
24242434
* compute the corresponding register offset at run time.

drivers/net/ethernet/cadence/macb.h

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -811,6 +811,9 @@ struct macb {
811811
void *rx_buffers;
812812
size_t rx_buffer_size;
813813

814+
unsigned int rx_ring_size;
815+
unsigned int tx_ring_size;
816+
814817
unsigned int num_queues;
815818
unsigned int queue_mask;
816819
struct macb_queue queues[MACB_MAX_QUEUES];

0 commit comments

Comments
 (0)