Skip to content

Commit 34bf65d

Browse files
tlendackydavem330
authored andcommitted
amd-xgbe: Add netif_* message support to the driver
Add support for the network interface message level settings for determining whether to issue some of the driver messages. Make use of the netif_* interface where appropriate. Signed-off-by: Tom Lendacky <[email protected]> Signed-off-by: David S. Miller <[email protected]>
1 parent 5452b2d commit 34bf65d

File tree

7 files changed

+191
-151
lines changed

7 files changed

+191
-151
lines changed

drivers/net/ethernet/amd/xgbe/xgbe-dcb.c

Lines changed: 11 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -150,9 +150,12 @@ static int xgbe_dcb_ieee_setets(struct net_device *netdev,
150150
tc_ets = 0;
151151
tc_ets_weight = 0;
152152
for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
153-
DBGPR(" TC%u: tx_bw=%hhu, rx_bw=%hhu, tsa=%hhu\n", i,
154-
ets->tc_tx_bw[i], ets->tc_rx_bw[i], ets->tc_tsa[i]);
155-
DBGPR(" PRIO%u: TC=%hhu\n", i, ets->prio_tc[i]);
153+
netif_dbg(pdata, drv, netdev,
154+
"TC%u: tx_bw=%hhu, rx_bw=%hhu, tsa=%hhu\n", i,
155+
ets->tc_tx_bw[i], ets->tc_rx_bw[i],
156+
ets->tc_tsa[i]);
157+
netif_dbg(pdata, drv, netdev, "PRIO%u: TC=%hhu\n", i,
158+
ets->prio_tc[i]);
156159

157160
if ((ets->tc_tx_bw[i] || ets->tc_tsa[i]) &&
158161
(i >= pdata->hw_feat.tc_cnt))
@@ -214,8 +217,9 @@ static int xgbe_dcb_ieee_setpfc(struct net_device *netdev,
214217
{
215218
struct xgbe_prv_data *pdata = netdev_priv(netdev);
216219

217-
DBGPR(" cap=%hhu, en=%hhx, mbc=%hhu, delay=%hhu\n",
218-
pfc->pfc_cap, pfc->pfc_en, pfc->mbc, pfc->delay);
220+
netif_dbg(pdata, drv, netdev,
221+
"cap=%hhu, en=%#hhx, mbc=%hhu, delay=%hhu\n",
222+
pfc->pfc_cap, pfc->pfc_en, pfc->mbc, pfc->delay);
219223

220224
if (!pdata->pfc) {
221225
pdata->pfc = devm_kzalloc(pdata->dev, sizeof(*pdata->pfc),
@@ -238,9 +242,10 @@ static u8 xgbe_dcb_getdcbx(struct net_device *netdev)
238242

239243
static u8 xgbe_dcb_setdcbx(struct net_device *netdev, u8 dcbx)
240244
{
245+
struct xgbe_prv_data *pdata = netdev_priv(netdev);
241246
u8 support = xgbe_dcb_getdcbx(netdev);
242247

243-
DBGPR(" DCBX=%#hhx\n", dcbx);
248+
netif_dbg(pdata, drv, netdev, "DCBX=%#hhx\n", dcbx);
244249

245250
if (dcbx & ~support)
246251
return 1;

drivers/net/ethernet/amd/xgbe/xgbe-desc.c

Lines changed: 21 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -208,8 +208,9 @@ static int xgbe_init_ring(struct xgbe_prv_data *pdata,
208208
if (!ring->rdata)
209209
return -ENOMEM;
210210

211-
DBGPR(" rdesc=0x%p, rdesc_dma=0x%llx, rdata=0x%p\n",
212-
ring->rdesc, ring->rdesc_dma, ring->rdata);
211+
netif_dbg(pdata, drv, pdata->netdev,
212+
"rdesc=%p, rdesc_dma=%pad, rdata=%p\n",
213+
ring->rdesc, &ring->rdesc_dma, ring->rdata);
213214

214215
DBGPR("<--xgbe_init_ring\n");
215216

@@ -226,7 +227,9 @@ static int xgbe_alloc_ring_resources(struct xgbe_prv_data *pdata)
226227

227228
channel = pdata->channel;
228229
for (i = 0; i < pdata->channel_count; i++, channel++) {
229-
DBGPR(" %s - tx_ring:\n", channel->name);
230+
netif_dbg(pdata, drv, pdata->netdev, "%s - Tx ring:\n",
231+
channel->name);
232+
230233
ret = xgbe_init_ring(pdata, channel->tx_ring,
231234
pdata->tx_desc_count);
232235
if (ret) {
@@ -235,12 +238,14 @@ static int xgbe_alloc_ring_resources(struct xgbe_prv_data *pdata)
235238
goto err_ring;
236239
}
237240

238-
DBGPR(" %s - rx_ring:\n", channel->name);
241+
netif_dbg(pdata, drv, pdata->netdev, "%s - Rx ring:\n",
242+
channel->name);
243+
239244
ret = xgbe_init_ring(pdata, channel->rx_ring,
240245
pdata->rx_desc_count);
241246
if (ret) {
242247
netdev_alert(pdata->netdev,
243-
"error initializing Tx ring\n");
248+
"error initializing Rx ring\n");
244249
goto err_ring;
245250
}
246251
}
@@ -518,8 +523,6 @@ static int xgbe_map_tx_skb(struct xgbe_channel *channel, struct sk_buff *skb)
518523
rdata = XGBE_GET_DESC_DATA(ring, cur_index);
519524

520525
if (tso) {
521-
DBGPR(" TSO packet\n");
522-
523526
/* Map the TSO header */
524527
skb_dma = dma_map_single(pdata->dev, skb->data,
525528
packet->header_len, DMA_TO_DEVICE);
@@ -529,6 +532,9 @@ static int xgbe_map_tx_skb(struct xgbe_channel *channel, struct sk_buff *skb)
529532
}
530533
rdata->skb_dma = skb_dma;
531534
rdata->skb_dma_len = packet->header_len;
535+
netif_dbg(pdata, tx_queued, pdata->netdev,
536+
"skb header: index=%u, dma=%pad, len=%u\n",
537+
cur_index, &skb_dma, packet->header_len);
532538

533539
offset = packet->header_len;
534540

@@ -550,8 +556,9 @@ static int xgbe_map_tx_skb(struct xgbe_channel *channel, struct sk_buff *skb)
550556
}
551557
rdata->skb_dma = skb_dma;
552558
rdata->skb_dma_len = len;
553-
DBGPR(" skb data: index=%u, dma=0x%llx, len=%u\n",
554-
cur_index, skb_dma, len);
559+
netif_dbg(pdata, tx_queued, pdata->netdev,
560+
"skb data: index=%u, dma=%pad, len=%u\n",
561+
cur_index, &skb_dma, len);
555562

556563
datalen -= len;
557564
offset += len;
@@ -563,7 +570,8 @@ static int xgbe_map_tx_skb(struct xgbe_channel *channel, struct sk_buff *skb)
563570
}
564571

565572
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
566-
DBGPR(" mapping frag %u\n", i);
573+
netif_dbg(pdata, tx_queued, pdata->netdev,
574+
"mapping frag %u\n", i);
567575

568576
frag = &skb_shinfo(skb)->frags[i];
569577
offset = 0;
@@ -582,8 +590,9 @@ static int xgbe_map_tx_skb(struct xgbe_channel *channel, struct sk_buff *skb)
582590
rdata->skb_dma = skb_dma;
583591
rdata->skb_dma_len = len;
584592
rdata->mapped_as_page = 1;
585-
DBGPR(" skb data: index=%u, dma=0x%llx, len=%u\n",
586-
cur_index, skb_dma, len);
593+
netif_dbg(pdata, tx_queued, pdata->netdev,
594+
"skb frag: index=%u, dma=%pad, len=%u\n",
595+
cur_index, &skb_dma, len);
587596

588597
datalen -= len;
589598
offset += len;

drivers/net/ethernet/amd/xgbe/xgbe-dev.c

Lines changed: 41 additions & 29 deletions
Original file line numberDiff line numberDiff line change
@@ -710,7 +710,8 @@ static int xgbe_set_promiscuous_mode(struct xgbe_prv_data *pdata,
710710
if (XGMAC_IOREAD_BITS(pdata, MAC_PFR, PR) == val)
711711
return 0;
712712

713-
DBGPR(" %s promiscuous mode\n", enable ? "entering" : "leaving");
713+
netif_dbg(pdata, drv, pdata->netdev, "%s promiscuous mode\n",
714+
enable ? "entering" : "leaving");
714715
XGMAC_IOWRITE_BITS(pdata, MAC_PFR, PR, val);
715716

716717
return 0;
@@ -724,7 +725,8 @@ static int xgbe_set_all_multicast_mode(struct xgbe_prv_data *pdata,
724725
if (XGMAC_IOREAD_BITS(pdata, MAC_PFR, PM) == val)
725726
return 0;
726727

727-
DBGPR(" %s allmulti mode\n", enable ? "entering" : "leaving");
728+
netif_dbg(pdata, drv, pdata->netdev, "%s allmulti mode\n",
729+
enable ? "entering" : "leaving");
728730
XGMAC_IOWRITE_BITS(pdata, MAC_PFR, PM, val);
729731

730732
return 0;
@@ -749,8 +751,9 @@ static void xgbe_set_mac_reg(struct xgbe_prv_data *pdata,
749751
mac_addr[0] = ha->addr[4];
750752
mac_addr[1] = ha->addr[5];
751753

752-
DBGPR(" adding mac address %pM at 0x%04x\n", ha->addr,
753-
*mac_reg);
754+
netif_dbg(pdata, drv, pdata->netdev,
755+
"adding mac address %pM at %#x\n",
756+
ha->addr, *mac_reg);
754757

755758
XGMAC_SET_BITS(mac_addr_hi, MAC_MACA1HR, AE, 1);
756759
}
@@ -1322,15 +1325,17 @@ static void xgbe_config_dcb_tc(struct xgbe_prv_data *pdata)
13221325
for (i = 0; i < pdata->hw_feat.tc_cnt; i++) {
13231326
switch (ets->tc_tsa[i]) {
13241327
case IEEE_8021QAZ_TSA_STRICT:
1325-
DBGPR(" TC%u using SP\n", i);
1328+
netif_dbg(pdata, drv, pdata->netdev,
1329+
"TC%u using SP\n", i);
13261330
XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_TC_ETSCR, TSA,
13271331
MTL_TSA_SP);
13281332
break;
13291333
case IEEE_8021QAZ_TSA_ETS:
13301334
weight = total_weight * ets->tc_tx_bw[i] / 100;
13311335
weight = clamp(weight, min_weight, total_weight);
13321336

1333-
DBGPR(" TC%u using DWRR (weight %u)\n", i, weight);
1337+
netif_dbg(pdata, drv, pdata->netdev,
1338+
"TC%u using DWRR (weight %u)\n", i, weight);
13341339
XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_TC_ETSCR, TSA,
13351340
MTL_TSA_ETS);
13361341
XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_TC_QWR, QW,
@@ -1359,7 +1364,8 @@ static void xgbe_config_dcb_pfc(struct xgbe_prv_data *pdata)
13591364
}
13601365
mask &= 0xff;
13611366

1362-
DBGPR(" TC%u PFC mask=%#x\n", tc, mask);
1367+
netif_dbg(pdata, drv, pdata->netdev, "TC%u PFC mask=%#x\n",
1368+
tc, mask);
13631369
reg = MTL_TCPM0R + (MTL_TCPM_INC * (tc / MTL_TCPM_TC_PER_REG));
13641370
reg_val = XGMAC_IOREAD(pdata, reg);
13651371

@@ -1457,8 +1463,9 @@ static void xgbe_dev_xmit(struct xgbe_channel *channel)
14571463
/* Create a context descriptor if this is a TSO packet */
14581464
if (tso_context || vlan_context) {
14591465
if (tso_context) {
1460-
DBGPR(" TSO context descriptor, mss=%u\n",
1461-
packet->mss);
1466+
netif_dbg(pdata, tx_queued, pdata->netdev,
1467+
"TSO context descriptor, mss=%u\n",
1468+
packet->mss);
14621469

14631470
/* Set the MSS size */
14641471
XGMAC_SET_BITS_LE(rdesc->desc2, TX_CONTEXT_DESC2,
@@ -1476,8 +1483,9 @@ static void xgbe_dev_xmit(struct xgbe_channel *channel)
14761483
}
14771484

14781485
if (vlan_context) {
1479-
DBGPR(" VLAN context descriptor, ctag=%u\n",
1480-
packet->vlan_ctag);
1486+
netif_dbg(pdata, tx_queued, pdata->netdev,
1487+
"VLAN context descriptor, ctag=%u\n",
1488+
packet->vlan_ctag);
14811489

14821490
/* Mark it as a CONTEXT descriptor */
14831491
XGMAC_SET_BITS_LE(rdesc->desc3, TX_CONTEXT_DESC3,
@@ -1596,9 +1604,9 @@ static void xgbe_dev_xmit(struct xgbe_channel *channel)
15961604
rdesc = rdata->rdesc;
15971605
XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, OWN, 1);
15981606

1599-
#ifdef XGMAC_ENABLE_TX_DESC_DUMP
1600-
xgbe_dump_tx_desc(ring, start_index, packet->rdesc_count, 1);
1601-
#endif
1607+
if (netif_msg_tx_queued(pdata))
1608+
xgbe_dump_tx_desc(pdata, ring, start_index,
1609+
packet->rdesc_count, 1);
16021610

16031611
/* Make sure ownership is written to the descriptor */
16041612
dma_wmb();
@@ -1640,9 +1648,8 @@ static int xgbe_dev_read(struct xgbe_channel *channel)
16401648
/* Make sure descriptor fields are read after reading the OWN bit */
16411649
dma_rmb();
16421650

1643-
#ifdef XGMAC_ENABLE_RX_DESC_DUMP
1644-
xgbe_dump_rx_desc(ring, rdesc, ring->cur);
1645-
#endif
1651+
if (netif_msg_rx_status(pdata))
1652+
xgbe_dump_rx_desc(pdata, ring, ring->cur);
16461653

16471654
if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, CTXT)) {
16481655
/* Timestamp Context Descriptor */
@@ -1713,7 +1720,7 @@ static int xgbe_dev_read(struct xgbe_channel *channel)
17131720
/* Check for errors (only valid in last descriptor) */
17141721
err = XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, ES);
17151722
etlt = XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, ETLT);
1716-
DBGPR(" err=%u, etlt=%#x\n", err, etlt);
1723+
netif_dbg(pdata, rx_status, netdev, "err=%u, etlt=%#x\n", err, etlt);
17171724

17181725
if (!err || !etlt) {
17191726
/* No error if err is 0 or etlt is 0 */
@@ -1724,7 +1731,8 @@ static int xgbe_dev_read(struct xgbe_channel *channel)
17241731
packet->vlan_ctag = XGMAC_GET_BITS_LE(rdesc->desc0,
17251732
RX_NORMAL_DESC0,
17261733
OVT);
1727-
DBGPR(" vlan-ctag=0x%04x\n", packet->vlan_ctag);
1734+
netif_dbg(pdata, rx_status, netdev, "vlan-ctag=%#06x\n",
1735+
packet->vlan_ctag);
17281736
}
17291737
} else {
17301738
if ((etlt == 0x05) || (etlt == 0x06))
@@ -2032,9 +2040,9 @@ static void xgbe_config_tx_fifo_size(struct xgbe_prv_data *pdata)
20322040
for (i = 0; i < pdata->tx_q_count; i++)
20332041
XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TQS, fifo_size);
20342042

2035-
netdev_notice(pdata->netdev,
2036-
"%d Tx hardware queues, %d byte fifo per queue\n",
2037-
pdata->tx_q_count, ((fifo_size + 1) * 256));
2043+
netif_info(pdata, drv, pdata->netdev,
2044+
"%d Tx hardware queues, %d byte fifo per queue\n",
2045+
pdata->tx_q_count, ((fifo_size + 1) * 256));
20382046
}
20392047

20402048
static void xgbe_config_rx_fifo_size(struct xgbe_prv_data *pdata)
@@ -2048,9 +2056,9 @@ static void xgbe_config_rx_fifo_size(struct xgbe_prv_data *pdata)
20482056
for (i = 0; i < pdata->rx_q_count; i++)
20492057
XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, RQS, fifo_size);
20502058

2051-
netdev_notice(pdata->netdev,
2052-
"%d Rx hardware queues, %d byte fifo per queue\n",
2053-
pdata->rx_q_count, ((fifo_size + 1) * 256));
2059+
netif_info(pdata, drv, pdata->netdev,
2060+
"%d Rx hardware queues, %d byte fifo per queue\n",
2061+
pdata->rx_q_count, ((fifo_size + 1) * 256));
20542062
}
20552063

20562064
static void xgbe_config_queue_mapping(struct xgbe_prv_data *pdata)
@@ -2069,14 +2077,16 @@ static void xgbe_config_queue_mapping(struct xgbe_prv_data *pdata)
20692077

20702078
for (i = 0, queue = 0; i < pdata->hw_feat.tc_cnt; i++) {
20712079
for (j = 0; j < qptc; j++) {
2072-
DBGPR(" TXq%u mapped to TC%u\n", queue, i);
2080+
netif_dbg(pdata, drv, pdata->netdev,
2081+
"TXq%u mapped to TC%u\n", queue, i);
20732082
XGMAC_MTL_IOWRITE_BITS(pdata, queue, MTL_Q_TQOMR,
20742083
Q2TCMAP, i);
20752084
pdata->q2tc_map[queue++] = i;
20762085
}
20772086

20782087
if (i < qptc_extra) {
2079-
DBGPR(" TXq%u mapped to TC%u\n", queue, i);
2088+
netif_dbg(pdata, drv, pdata->netdev,
2089+
"TXq%u mapped to TC%u\n", queue, i);
20802090
XGMAC_MTL_IOWRITE_BITS(pdata, queue, MTL_Q_TQOMR,
20812091
Q2TCMAP, i);
20822092
pdata->q2tc_map[queue++] = i;
@@ -2094,13 +2104,15 @@ static void xgbe_config_queue_mapping(struct xgbe_prv_data *pdata)
20942104
for (i = 0, prio = 0; i < prio_queues;) {
20952105
mask = 0;
20962106
for (j = 0; j < ppq; j++) {
2097-
DBGPR(" PRIO%u mapped to RXq%u\n", prio, i);
2107+
netif_dbg(pdata, drv, pdata->netdev,
2108+
"PRIO%u mapped to RXq%u\n", prio, i);
20982109
mask |= (1 << prio);
20992110
pdata->prio2q_map[prio++] = i;
21002111
}
21012112

21022113
if (i < ppq_extra) {
2103-
DBGPR(" PRIO%u mapped to RXq%u\n", prio, i);
2114+
netif_dbg(pdata, drv, pdata->netdev,
2115+
"PRIO%u mapped to RXq%u\n", prio, i);
21042116
mask |= (1 << prio);
21052117
pdata->prio2q_map[prio++] = i;
21062118
}

0 commit comments

Comments
 (0)