35
35
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
36
36
*/
37
37
38
+ #define pr_fmt (fmt ) KBUILD_MODNAME ": " fmt
39
+
38
40
#include <linux/init.h>
39
41
#include <linux/dma-mapping.h>
40
42
#include <linux/in.h>
@@ -627,9 +629,8 @@ static int rxq_process(struct rx_queue *rxq, int budget)
627
629
if ((cmd_sts & (RX_FIRST_DESC | RX_LAST_DESC )) !=
628
630
(RX_FIRST_DESC | RX_LAST_DESC )) {
629
631
if (net_ratelimit ())
630
- dev_printk (KERN_ERR , & mp -> dev -> dev ,
631
- "received packet spanning "
632
- "multiple descriptors\n" );
632
+ netdev_err (mp -> dev ,
633
+ "received packet spanning multiple descriptors\n" );
633
634
}
634
635
635
636
if (cmd_sts & ERROR_SUMMARY )
@@ -868,15 +869,14 @@ static netdev_tx_t mv643xx_eth_xmit(struct sk_buff *skb, struct net_device *dev)
868
869
869
870
if (has_tiny_unaligned_frags (skb ) && __skb_linearize (skb )) {
870
871
txq -> tx_dropped ++ ;
871
- dev_printk (KERN_DEBUG , & dev -> dev ,
872
- "failed to linearize skb with tiny "
873
- "unaligned fragment\n" );
872
+ netdev_printk (KERN_DEBUG , dev ,
873
+ "failed to linearize skb with tiny unaligned fragment\n" );
874
874
return NETDEV_TX_BUSY ;
875
875
}
876
876
877
877
if (txq -> tx_ring_size - txq -> tx_desc_count < MAX_SKB_FRAGS + 1 ) {
878
878
if (net_ratelimit ())
879
- dev_printk ( KERN_ERR , & dev -> dev , "tx queue full?!\n" );
879
+ netdev_err ( dev , "tx queue full?!\n" );
880
880
kfree_skb (skb );
881
881
return NETDEV_TX_OK ;
882
882
}
@@ -959,7 +959,7 @@ static int txq_reclaim(struct tx_queue *txq, int budget, int force)
959
959
skb = __skb_dequeue (& txq -> tx_skb );
960
960
961
961
if (cmd_sts & ERROR_SUMMARY ) {
962
- dev_printk ( KERN_INFO , & mp -> dev -> dev , "tx error\n" );
962
+ netdev_info ( mp -> dev , "tx error\n" );
963
963
mp -> dev -> stats .tx_errors ++ ;
964
964
}
965
965
@@ -1122,20 +1122,20 @@ static int smi_bus_read(struct mii_bus *bus, int addr, int reg)
1122
1122
int ret ;
1123
1123
1124
1124
if (smi_wait_ready (msp )) {
1125
- printk ( KERN_WARNING "mv643xx_eth: SMI bus busy timeout\n" );
1125
+ pr_warn ( " SMI bus busy timeout\n" );
1126
1126
return - ETIMEDOUT ;
1127
1127
}
1128
1128
1129
1129
writel (SMI_OPCODE_READ | (reg << 21 ) | (addr << 16 ), smi_reg );
1130
1130
1131
1131
if (smi_wait_ready (msp )) {
1132
- printk ( KERN_WARNING "mv643xx_eth: SMI bus busy timeout\n" );
1132
+ pr_warn ( " SMI bus busy timeout\n" );
1133
1133
return - ETIMEDOUT ;
1134
1134
}
1135
1135
1136
1136
ret = readl (smi_reg );
1137
1137
if (!(ret & SMI_READ_VALID )) {
1138
- printk ( KERN_WARNING "mv643xx_eth: SMI bus read not valid\n" );
1138
+ pr_warn ( " SMI bus read not valid\n" );
1139
1139
return - ENODEV ;
1140
1140
}
1141
1141
@@ -1148,15 +1148,15 @@ static int smi_bus_write(struct mii_bus *bus, int addr, int reg, u16 val)
1148
1148
void __iomem * smi_reg = msp -> base + SMI_REG ;
1149
1149
1150
1150
if (smi_wait_ready (msp )) {
1151
- printk ( KERN_WARNING "mv643xx_eth: SMI bus busy timeout\n" );
1151
+ pr_warn ( " SMI bus busy timeout\n" );
1152
1152
return - ETIMEDOUT ;
1153
1153
}
1154
1154
1155
1155
writel (SMI_OPCODE_WRITE | (reg << 21 ) |
1156
1156
(addr << 16 ) | (val & 0xffff ), smi_reg );
1157
1157
1158
1158
if (smi_wait_ready (msp )) {
1159
- printk ( KERN_WARNING "mv643xx_eth: SMI bus busy timeout\n" );
1159
+ pr_warn ( " SMI bus busy timeout\n" );
1160
1160
return - ETIMEDOUT ;
1161
1161
}
1162
1162
@@ -1566,9 +1566,8 @@ mv643xx_eth_set_ringparam(struct net_device *dev, struct ethtool_ringparam *er)
1566
1566
if (netif_running (dev )) {
1567
1567
mv643xx_eth_stop (dev );
1568
1568
if (mv643xx_eth_open (dev )) {
1569
- dev_printk (KERN_ERR , & dev -> dev ,
1570
- "fatal error on re-opening device after "
1571
- "ring param change\n" );
1569
+ netdev_err (dev ,
1570
+ "fatal error on re-opening device after ring param change\n" );
1572
1571
return - ENOMEM ;
1573
1572
}
1574
1573
}
@@ -1874,7 +1873,7 @@ static int rxq_init(struct mv643xx_eth_private *mp, int index)
1874
1873
}
1875
1874
1876
1875
if (rxq -> rx_desc_area == NULL ) {
1877
- dev_printk ( KERN_ERR , & mp -> dev -> dev ,
1876
+ netdev_err ( mp -> dev ,
1878
1877
"can't allocate rx ring (%d bytes)\n" , size );
1879
1878
goto out ;
1880
1879
}
@@ -1884,8 +1883,7 @@ static int rxq_init(struct mv643xx_eth_private *mp, int index)
1884
1883
rxq -> rx_skb = kmalloc (rxq -> rx_ring_size * sizeof (* rxq -> rx_skb ),
1885
1884
GFP_KERNEL );
1886
1885
if (rxq -> rx_skb == NULL ) {
1887
- dev_printk (KERN_ERR , & mp -> dev -> dev ,
1888
- "can't allocate rx skb ring\n" );
1886
+ netdev_err (mp -> dev , "can't allocate rx skb ring\n" );
1889
1887
goto out_free ;
1890
1888
}
1891
1889
@@ -1944,8 +1942,7 @@ static void rxq_deinit(struct rx_queue *rxq)
1944
1942
}
1945
1943
1946
1944
if (rxq -> rx_desc_count ) {
1947
- dev_printk (KERN_ERR , & mp -> dev -> dev ,
1948
- "error freeing rx ring -- %d skbs stuck\n" ,
1945
+ netdev_err (mp -> dev , "error freeing rx ring -- %d skbs stuck\n" ,
1949
1946
rxq -> rx_desc_count );
1950
1947
}
1951
1948
@@ -1987,7 +1984,7 @@ static int txq_init(struct mv643xx_eth_private *mp, int index)
1987
1984
}
1988
1985
1989
1986
if (txq -> tx_desc_area == NULL ) {
1990
- dev_printk ( KERN_ERR , & mp -> dev -> dev ,
1987
+ netdev_err ( mp -> dev ,
1991
1988
"can't allocate tx ring (%d bytes)\n" , size );
1992
1989
return - ENOMEM ;
1993
1990
}
@@ -2093,7 +2090,7 @@ static void handle_link_event(struct mv643xx_eth_private *mp)
2093
2090
if (netif_carrier_ok (dev )) {
2094
2091
int i ;
2095
2092
2096
- printk ( KERN_INFO "%s: link down\n", dev -> name );
2093
+ netdev_info ( dev , " link down\n" );
2097
2094
2098
2095
netif_carrier_off (dev );
2099
2096
@@ -2124,10 +2121,8 @@ static void handle_link_event(struct mv643xx_eth_private *mp)
2124
2121
duplex = (port_status & FULL_DUPLEX ) ? 1 : 0 ;
2125
2122
fc = (port_status & FLOW_CONTROL_ENABLED ) ? 1 : 0 ;
2126
2123
2127
- printk (KERN_INFO "%s: link up, %d Mb/s, %s duplex, "
2128
- "flow control %sabled\n" , dev -> name ,
2129
- speed , duplex ? "full" : "half" ,
2130
- fc ? "en" : "dis" );
2124
+ netdev_info (dev , "link up, %d Mb/s, %s duplex, flow control %sabled\n" ,
2125
+ speed , duplex ? "full" : "half" , fc ? "en" : "dis" );
2131
2126
2132
2127
if (!netif_carrier_ok (dev ))
2133
2128
netif_carrier_on (dev );
@@ -2337,7 +2332,7 @@ static int mv643xx_eth_open(struct net_device *dev)
2337
2332
err = request_irq (dev -> irq , mv643xx_eth_irq ,
2338
2333
IRQF_SHARED , dev -> name , dev );
2339
2334
if (err ) {
2340
- dev_printk ( KERN_ERR , & dev -> dev , "can't assign irq\n" );
2335
+ netdev_err ( dev , "can't assign irq\n" );
2341
2336
return - EAGAIN ;
2342
2337
}
2343
2338
@@ -2483,9 +2478,8 @@ static int mv643xx_eth_change_mtu(struct net_device *dev, int new_mtu)
2483
2478
*/
2484
2479
mv643xx_eth_stop (dev );
2485
2480
if (mv643xx_eth_open (dev )) {
2486
- dev_printk (KERN_ERR , & dev -> dev ,
2487
- "fatal error on re-opening device after "
2488
- "MTU change\n" );
2481
+ netdev_err (dev ,
2482
+ "fatal error on re-opening device after MTU change\n" );
2489
2483
}
2490
2484
2491
2485
return 0 ;
@@ -2508,7 +2502,7 @@ static void mv643xx_eth_tx_timeout(struct net_device *dev)
2508
2502
{
2509
2503
struct mv643xx_eth_private * mp = netdev_priv (dev );
2510
2504
2511
- dev_printk ( KERN_INFO , & dev -> dev , "tx timeout\n" );
2505
+ netdev_info ( dev , "tx timeout\n" );
2512
2506
2513
2507
schedule_work (& mp -> tx_timeout_task );
2514
2508
}
@@ -2603,8 +2597,8 @@ static int mv643xx_eth_shared_probe(struct platform_device *pdev)
2603
2597
int ret ;
2604
2598
2605
2599
if (!mv643xx_eth_version_printed ++ )
2606
- printk ( KERN_NOTICE "MV-643xx 10/100/1000 ethernet "
2607
- "driver version %s\n" , mv643xx_eth_driver_version );
2600
+ pr_notice ( "MV-643xx 10/100/1000 ethernet driver version %s\n" ,
2601
+ mv643xx_eth_driver_version );
2608
2602
2609
2603
ret = - EINVAL ;
2610
2604
res = platform_get_resource (pdev , IORESOURCE_MEM , 0 );
@@ -2871,14 +2865,12 @@ static int mv643xx_eth_probe(struct platform_device *pdev)
2871
2865
2872
2866
pd = pdev -> dev .platform_data ;
2873
2867
if (pd == NULL ) {
2874
- dev_printk (KERN_ERR , & pdev -> dev ,
2875
- "no mv643xx_eth_platform_data\n" );
2868
+ dev_err (& pdev -> dev , "no mv643xx_eth_platform_data\n" );
2876
2869
return - ENODEV ;
2877
2870
}
2878
2871
2879
2872
if (pd -> shared == NULL ) {
2880
- dev_printk (KERN_ERR , & pdev -> dev ,
2881
- "no mv643xx_eth_platform_data->shared\n" );
2873
+ dev_err (& pdev -> dev , "no mv643xx_eth_platform_data->shared\n" );
2882
2874
return - ENODEV ;
2883
2875
}
2884
2876
@@ -2957,11 +2949,11 @@ static int mv643xx_eth_probe(struct platform_device *pdev)
2957
2949
if (err )
2958
2950
goto out ;
2959
2951
2960
- dev_printk ( KERN_NOTICE , & dev -> dev , "port %d with MAC address %pM\n" ,
2961
- mp -> port_num , dev -> dev_addr );
2952
+ netdev_notice ( dev , "port %d with MAC address %pM\n" ,
2953
+ mp -> port_num , dev -> dev_addr );
2962
2954
2963
2955
if (mp -> tx_desc_sram_size > 0 )
2964
- dev_printk ( KERN_NOTICE , & dev -> dev , "configured with sram\n" );
2956
+ netdev_notice ( dev , "configured with sram\n" );
2965
2957
2966
2958
return 0 ;
2967
2959
0 commit comments