@@ -710,7 +710,8 @@ static int xgbe_set_promiscuous_mode(struct xgbe_prv_data *pdata,
710
710
if (XGMAC_IOREAD_BITS (pdata , MAC_PFR , PR ) == val )
711
711
return 0 ;
712
712
713
- DBGPR (" %s promiscuous mode\n" , enable ? "entering" : "leaving" );
713
+ netif_dbg (pdata , drv , pdata -> netdev , "%s promiscuous mode\n" ,
714
+ enable ? "entering" : "leaving" );
714
715
XGMAC_IOWRITE_BITS (pdata , MAC_PFR , PR , val );
715
716
716
717
return 0 ;
@@ -724,7 +725,8 @@ static int xgbe_set_all_multicast_mode(struct xgbe_prv_data *pdata,
724
725
if (XGMAC_IOREAD_BITS (pdata , MAC_PFR , PM ) == val )
725
726
return 0 ;
726
727
727
- DBGPR (" %s allmulti mode\n" , enable ? "entering" : "leaving" );
728
+ netif_dbg (pdata , drv , pdata -> netdev , "%s allmulti mode\n" ,
729
+ enable ? "entering" : "leaving" );
728
730
XGMAC_IOWRITE_BITS (pdata , MAC_PFR , PM , val );
729
731
730
732
return 0 ;
@@ -749,8 +751,9 @@ static void xgbe_set_mac_reg(struct xgbe_prv_data *pdata,
749
751
mac_addr [0 ] = ha -> addr [4 ];
750
752
mac_addr [1 ] = ha -> addr [5 ];
751
753
752
- DBGPR (" adding mac address %pM at 0x%04x\n" , ha -> addr ,
753
- * mac_reg );
754
+ netif_dbg (pdata , drv , pdata -> netdev ,
755
+ "adding mac address %pM at %#x\n" ,
756
+ ha -> addr , * mac_reg );
754
757
755
758
XGMAC_SET_BITS (mac_addr_hi , MAC_MACA1HR , AE , 1 );
756
759
}
@@ -1322,15 +1325,17 @@ static void xgbe_config_dcb_tc(struct xgbe_prv_data *pdata)
1322
1325
for (i = 0 ; i < pdata -> hw_feat .tc_cnt ; i ++ ) {
1323
1326
switch (ets -> tc_tsa [i ]) {
1324
1327
case IEEE_8021QAZ_TSA_STRICT :
1325
- DBGPR (" TC%u using SP\n" , i );
1328
+ netif_dbg (pdata , drv , pdata -> netdev ,
1329
+ "TC%u using SP\n" , i );
1326
1330
XGMAC_MTL_IOWRITE_BITS (pdata , i , MTL_TC_ETSCR , TSA ,
1327
1331
MTL_TSA_SP );
1328
1332
break ;
1329
1333
case IEEE_8021QAZ_TSA_ETS :
1330
1334
weight = total_weight * ets -> tc_tx_bw [i ] / 100 ;
1331
1335
weight = clamp (weight , min_weight , total_weight );
1332
1336
1333
- DBGPR (" TC%u using DWRR (weight %u)\n" , i , weight );
1337
+ netif_dbg (pdata , drv , pdata -> netdev ,
1338
+ "TC%u using DWRR (weight %u)\n" , i , weight );
1334
1339
XGMAC_MTL_IOWRITE_BITS (pdata , i , MTL_TC_ETSCR , TSA ,
1335
1340
MTL_TSA_ETS );
1336
1341
XGMAC_MTL_IOWRITE_BITS (pdata , i , MTL_TC_QWR , QW ,
@@ -1359,7 +1364,8 @@ static void xgbe_config_dcb_pfc(struct xgbe_prv_data *pdata)
1359
1364
}
1360
1365
mask &= 0xff ;
1361
1366
1362
- DBGPR (" TC%u PFC mask=%#x\n" , tc , mask );
1367
+ netif_dbg (pdata , drv , pdata -> netdev , "TC%u PFC mask=%#x\n" ,
1368
+ tc , mask );
1363
1369
reg = MTL_TCPM0R + (MTL_TCPM_INC * (tc / MTL_TCPM_TC_PER_REG ));
1364
1370
reg_val = XGMAC_IOREAD (pdata , reg );
1365
1371
@@ -1457,8 +1463,9 @@ static void xgbe_dev_xmit(struct xgbe_channel *channel)
1457
1463
/* Create a context descriptor if this is a TSO packet */
1458
1464
if (tso_context || vlan_context ) {
1459
1465
if (tso_context ) {
1460
- DBGPR (" TSO context descriptor, mss=%u\n" ,
1461
- packet -> mss );
1466
+ netif_dbg (pdata , tx_queued , pdata -> netdev ,
1467
+ "TSO context descriptor, mss=%u\n" ,
1468
+ packet -> mss );
1462
1469
1463
1470
/* Set the MSS size */
1464
1471
XGMAC_SET_BITS_LE (rdesc -> desc2 , TX_CONTEXT_DESC2 ,
@@ -1476,8 +1483,9 @@ static void xgbe_dev_xmit(struct xgbe_channel *channel)
1476
1483
}
1477
1484
1478
1485
if (vlan_context ) {
1479
- DBGPR (" VLAN context descriptor, ctag=%u\n" ,
1480
- packet -> vlan_ctag );
1486
+ netif_dbg (pdata , tx_queued , pdata -> netdev ,
1487
+ "VLAN context descriptor, ctag=%u\n" ,
1488
+ packet -> vlan_ctag );
1481
1489
1482
1490
/* Mark it as a CONTEXT descriptor */
1483
1491
XGMAC_SET_BITS_LE (rdesc -> desc3 , TX_CONTEXT_DESC3 ,
@@ -1596,9 +1604,9 @@ static void xgbe_dev_xmit(struct xgbe_channel *channel)
1596
1604
rdesc = rdata -> rdesc ;
1597
1605
XGMAC_SET_BITS_LE (rdesc -> desc3 , TX_NORMAL_DESC3 , OWN , 1 );
1598
1606
1599
- #ifdef XGMAC_ENABLE_TX_DESC_DUMP
1600
- xgbe_dump_tx_desc (ring , start_index , packet -> rdesc_count , 1 );
1601
- #endif
1607
+ if ( netif_msg_tx_queued ( pdata ))
1608
+ xgbe_dump_tx_desc (pdata , ring , start_index ,
1609
+ packet -> rdesc_count , 1 );
1602
1610
1603
1611
/* Make sure ownership is written to the descriptor */
1604
1612
dma_wmb ();
@@ -1640,9 +1648,8 @@ static int xgbe_dev_read(struct xgbe_channel *channel)
1640
1648
/* Make sure descriptor fields are read after reading the OWN bit */
1641
1649
dma_rmb ();
1642
1650
1643
- #ifdef XGMAC_ENABLE_RX_DESC_DUMP
1644
- xgbe_dump_rx_desc (ring , rdesc , ring -> cur );
1645
- #endif
1651
+ if (netif_msg_rx_status (pdata ))
1652
+ xgbe_dump_rx_desc (pdata , ring , ring -> cur );
1646
1653
1647
1654
if (XGMAC_GET_BITS_LE (rdesc -> desc3 , RX_NORMAL_DESC3 , CTXT )) {
1648
1655
/* Timestamp Context Descriptor */
@@ -1713,7 +1720,7 @@ static int xgbe_dev_read(struct xgbe_channel *channel)
1713
1720
/* Check for errors (only valid in last descriptor) */
1714
1721
err = XGMAC_GET_BITS_LE (rdesc -> desc3 , RX_NORMAL_DESC3 , ES );
1715
1722
etlt = XGMAC_GET_BITS_LE (rdesc -> desc3 , RX_NORMAL_DESC3 , ETLT );
1716
- DBGPR ( " err=%u, etlt=%#x\n" , err , etlt );
1723
+ netif_dbg ( pdata , rx_status , netdev , " err=%u, etlt=%#x\n" , err , etlt );
1717
1724
1718
1725
if (!err || !etlt ) {
1719
1726
/* No error if err is 0 or etlt is 0 */
@@ -1724,7 +1731,8 @@ static int xgbe_dev_read(struct xgbe_channel *channel)
1724
1731
packet -> vlan_ctag = XGMAC_GET_BITS_LE (rdesc -> desc0 ,
1725
1732
RX_NORMAL_DESC0 ,
1726
1733
OVT );
1727
- DBGPR (" vlan-ctag=0x%04x\n" , packet -> vlan_ctag );
1734
+ netif_dbg (pdata , rx_status , netdev , "vlan-ctag=%#06x\n" ,
1735
+ packet -> vlan_ctag );
1728
1736
}
1729
1737
} else {
1730
1738
if ((etlt == 0x05 ) || (etlt == 0x06 ))
@@ -2032,9 +2040,9 @@ static void xgbe_config_tx_fifo_size(struct xgbe_prv_data *pdata)
2032
2040
for (i = 0 ; i < pdata -> tx_q_count ; i ++ )
2033
2041
XGMAC_MTL_IOWRITE_BITS (pdata , i , MTL_Q_TQOMR , TQS , fifo_size );
2034
2042
2035
- netdev_notice ( pdata -> netdev ,
2036
- "%d Tx hardware queues, %d byte fifo per queue\n" ,
2037
- pdata -> tx_q_count , ((fifo_size + 1 ) * 256 ));
2043
+ netif_info ( pdata , drv , pdata -> netdev ,
2044
+ "%d Tx hardware queues, %d byte fifo per queue\n" ,
2045
+ pdata -> tx_q_count , ((fifo_size + 1 ) * 256 ));
2038
2046
}
2039
2047
2040
2048
static void xgbe_config_rx_fifo_size (struct xgbe_prv_data * pdata )
@@ -2048,9 +2056,9 @@ static void xgbe_config_rx_fifo_size(struct xgbe_prv_data *pdata)
2048
2056
for (i = 0 ; i < pdata -> rx_q_count ; i ++ )
2049
2057
XGMAC_MTL_IOWRITE_BITS (pdata , i , MTL_Q_RQOMR , RQS , fifo_size );
2050
2058
2051
- netdev_notice ( pdata -> netdev ,
2052
- "%d Rx hardware queues, %d byte fifo per queue\n" ,
2053
- pdata -> rx_q_count , ((fifo_size + 1 ) * 256 ));
2059
+ netif_info ( pdata , drv , pdata -> netdev ,
2060
+ "%d Rx hardware queues, %d byte fifo per queue\n" ,
2061
+ pdata -> rx_q_count , ((fifo_size + 1 ) * 256 ));
2054
2062
}
2055
2063
2056
2064
static void xgbe_config_queue_mapping (struct xgbe_prv_data * pdata )
@@ -2069,14 +2077,16 @@ static void xgbe_config_queue_mapping(struct xgbe_prv_data *pdata)
2069
2077
2070
2078
for (i = 0 , queue = 0 ; i < pdata -> hw_feat .tc_cnt ; i ++ ) {
2071
2079
for (j = 0 ; j < qptc ; j ++ ) {
2072
- DBGPR (" TXq%u mapped to TC%u\n" , queue , i );
2080
+ netif_dbg (pdata , drv , pdata -> netdev ,
2081
+ "TXq%u mapped to TC%u\n" , queue , i );
2073
2082
XGMAC_MTL_IOWRITE_BITS (pdata , queue , MTL_Q_TQOMR ,
2074
2083
Q2TCMAP , i );
2075
2084
pdata -> q2tc_map [queue ++ ] = i ;
2076
2085
}
2077
2086
2078
2087
if (i < qptc_extra ) {
2079
- DBGPR (" TXq%u mapped to TC%u\n" , queue , i );
2088
+ netif_dbg (pdata , drv , pdata -> netdev ,
2089
+ "TXq%u mapped to TC%u\n" , queue , i );
2080
2090
XGMAC_MTL_IOWRITE_BITS (pdata , queue , MTL_Q_TQOMR ,
2081
2091
Q2TCMAP , i );
2082
2092
pdata -> q2tc_map [queue ++ ] = i ;
@@ -2094,13 +2104,15 @@ static void xgbe_config_queue_mapping(struct xgbe_prv_data *pdata)
2094
2104
for (i = 0 , prio = 0 ; i < prio_queues ;) {
2095
2105
mask = 0 ;
2096
2106
for (j = 0 ; j < ppq ; j ++ ) {
2097
- DBGPR (" PRIO%u mapped to RXq%u\n" , prio , i );
2107
+ netif_dbg (pdata , drv , pdata -> netdev ,
2108
+ "PRIO%u mapped to RXq%u\n" , prio , i );
2098
2109
mask |= (1 << prio );
2099
2110
pdata -> prio2q_map [prio ++ ] = i ;
2100
2111
}
2101
2112
2102
2113
if (i < ppq_extra ) {
2103
- DBGPR (" PRIO%u mapped to RXq%u\n" , prio , i );
2114
+ netif_dbg (pdata , drv , pdata -> netdev ,
2115
+ "PRIO%u mapped to RXq%u\n" , prio , i );
2104
2116
mask |= (1 << prio );
2105
2117
pdata -> prio2q_map [prio ++ ] = i ;
2106
2118
}
0 commit comments