@@ -782,7 +782,7 @@ static void sh_eth_ring_free(struct net_device *ndev)
782
782
783
783
/* Free Rx skb ringbuffer */
784
784
if (mdp -> rx_skbuff ) {
785
- for (i = 0 ; i < RX_RING_SIZE ; i ++ ) {
785
+ for (i = 0 ; i < mdp -> num_rx_ring ; i ++ ) {
786
786
if (mdp -> rx_skbuff [i ])
787
787
dev_kfree_skb (mdp -> rx_skbuff [i ]);
788
788
}
@@ -792,7 +792,7 @@ static void sh_eth_ring_free(struct net_device *ndev)
792
792
793
793
/* Free Tx skb ringbuffer */
794
794
if (mdp -> tx_skbuff ) {
795
- for (i = 0 ; i < TX_RING_SIZE ; i ++ ) {
795
+ for (i = 0 ; i < mdp -> num_tx_ring ; i ++ ) {
796
796
if (mdp -> tx_skbuff [i ])
797
797
dev_kfree_skb (mdp -> tx_skbuff [i ]);
798
798
}
@@ -809,16 +809,16 @@ static void sh_eth_ring_format(struct net_device *ndev)
809
809
struct sk_buff * skb ;
810
810
struct sh_eth_rxdesc * rxdesc = NULL ;
811
811
struct sh_eth_txdesc * txdesc = NULL ;
812
- int rx_ringsize = sizeof (* rxdesc ) * RX_RING_SIZE ;
813
- int tx_ringsize = sizeof (* txdesc ) * TX_RING_SIZE ;
812
+ int rx_ringsize = sizeof (* rxdesc ) * mdp -> num_rx_ring ;
813
+ int tx_ringsize = sizeof (* txdesc ) * mdp -> num_tx_ring ;
814
814
815
815
mdp -> cur_rx = mdp -> cur_tx = 0 ;
816
816
mdp -> dirty_rx = mdp -> dirty_tx = 0 ;
817
817
818
818
memset (mdp -> rx_ring , 0 , rx_ringsize );
819
819
820
820
/* build Rx ring buffer */
821
- for (i = 0 ; i < RX_RING_SIZE ; i ++ ) {
821
+ for (i = 0 ; i < mdp -> num_rx_ring ; i ++ ) {
822
822
/* skb */
823
823
mdp -> rx_skbuff [i ] = NULL ;
824
824
skb = netdev_alloc_skb (ndev , mdp -> rx_buf_sz );
@@ -844,15 +844,15 @@ static void sh_eth_ring_format(struct net_device *ndev)
844
844
}
845
845
}
846
846
847
- mdp -> dirty_rx = (u32 ) (i - RX_RING_SIZE );
847
+ mdp -> dirty_rx = (u32 ) (i - mdp -> num_rx_ring );
848
848
849
849
/* Mark the last entry as wrapping the ring. */
850
850
rxdesc -> status |= cpu_to_edmac (mdp , RD_RDEL );
851
851
852
852
memset (mdp -> tx_ring , 0 , tx_ringsize );
853
853
854
854
/* build Tx ring buffer */
855
- for (i = 0 ; i < TX_RING_SIZE ; i ++ ) {
855
+ for (i = 0 ; i < mdp -> num_tx_ring ; i ++ ) {
856
856
mdp -> tx_skbuff [i ] = NULL ;
857
857
txdesc = & mdp -> tx_ring [i ];
858
858
txdesc -> status = cpu_to_edmac (mdp , TD_TFP );
@@ -886,15 +886,15 @@ static int sh_eth_ring_init(struct net_device *ndev)
886
886
mdp -> rx_buf_sz += NET_IP_ALIGN ;
887
887
888
888
/* Allocate RX and TX skb rings */
889
- mdp -> rx_skbuff = kmalloc (sizeof (* mdp -> rx_skbuff ) * RX_RING_SIZE ,
889
+ mdp -> rx_skbuff = kmalloc (sizeof (* mdp -> rx_skbuff ) * mdp -> num_rx_ring ,
890
890
GFP_KERNEL );
891
891
if (!mdp -> rx_skbuff ) {
892
892
dev_err (& ndev -> dev , "Cannot allocate Rx skb\n" );
893
893
ret = - ENOMEM ;
894
894
return ret ;
895
895
}
896
896
897
- mdp -> tx_skbuff = kmalloc (sizeof (* mdp -> tx_skbuff ) * TX_RING_SIZE ,
897
+ mdp -> tx_skbuff = kmalloc (sizeof (* mdp -> tx_skbuff ) * mdp -> num_tx_ring ,
898
898
GFP_KERNEL );
899
899
if (!mdp -> tx_skbuff ) {
900
900
dev_err (& ndev -> dev , "Cannot allocate Tx skb\n" );
@@ -903,7 +903,7 @@ static int sh_eth_ring_init(struct net_device *ndev)
903
903
}
904
904
905
905
/* Allocate all Rx descriptors. */
906
- rx_ringsize = sizeof (struct sh_eth_rxdesc ) * RX_RING_SIZE ;
906
+ rx_ringsize = sizeof (struct sh_eth_rxdesc ) * mdp -> num_rx_ring ;
907
907
mdp -> rx_ring = dma_alloc_coherent (NULL , rx_ringsize , & mdp -> rx_desc_dma ,
908
908
GFP_KERNEL );
909
909
@@ -917,7 +917,7 @@ static int sh_eth_ring_init(struct net_device *ndev)
917
917
mdp -> dirty_rx = 0 ;
918
918
919
919
/* Allocate all Tx descriptors. */
920
- tx_ringsize = sizeof (struct sh_eth_txdesc ) * TX_RING_SIZE ;
920
+ tx_ringsize = sizeof (struct sh_eth_txdesc ) * mdp -> num_tx_ring ;
921
921
mdp -> tx_ring = dma_alloc_coherent (NULL , tx_ringsize , & mdp -> tx_desc_dma ,
922
922
GFP_KERNEL );
923
923
if (!mdp -> tx_ring ) {
@@ -946,21 +946,21 @@ static void sh_eth_free_dma_buffer(struct sh_eth_private *mdp)
946
946
int ringsize ;
947
947
948
948
if (mdp -> rx_ring ) {
949
- ringsize = sizeof (struct sh_eth_rxdesc ) * RX_RING_SIZE ;
949
+ ringsize = sizeof (struct sh_eth_rxdesc ) * mdp -> num_rx_ring ;
950
950
dma_free_coherent (NULL , ringsize , mdp -> rx_ring ,
951
951
mdp -> rx_desc_dma );
952
952
mdp -> rx_ring = NULL ;
953
953
}
954
954
955
955
if (mdp -> tx_ring ) {
956
- ringsize = sizeof (struct sh_eth_txdesc ) * TX_RING_SIZE ;
956
+ ringsize = sizeof (struct sh_eth_txdesc ) * mdp -> num_tx_ring ;
957
957
dma_free_coherent (NULL , ringsize , mdp -> tx_ring ,
958
958
mdp -> tx_desc_dma );
959
959
mdp -> tx_ring = NULL ;
960
960
}
961
961
}
962
962
963
- static int sh_eth_dev_init (struct net_device * ndev )
963
+ static int sh_eth_dev_init (struct net_device * ndev , bool start )
964
964
{
965
965
int ret = 0 ;
966
966
struct sh_eth_private * mdp = netdev_priv (ndev );
@@ -1008,7 +1008,8 @@ static int sh_eth_dev_init(struct net_device *ndev)
1008
1008
RFLR );
1009
1009
1010
1010
sh_eth_write (ndev , sh_eth_read (ndev , EESR ), EESR );
1011
- sh_eth_write (ndev , mdp -> cd -> eesipr_value , EESIPR );
1011
+ if (start )
1012
+ sh_eth_write (ndev , mdp -> cd -> eesipr_value , EESIPR );
1012
1013
1013
1014
/* PAUSE Prohibition */
1014
1015
val = (sh_eth_read (ndev , ECMR ) & ECMR_DM ) |
@@ -1023,7 +1024,8 @@ static int sh_eth_dev_init(struct net_device *ndev)
1023
1024
sh_eth_write (ndev , mdp -> cd -> ecsr_value , ECSR );
1024
1025
1025
1026
/* E-MAC Interrupt Enable register */
1026
- sh_eth_write (ndev , mdp -> cd -> ecsipr_value , ECSIPR );
1027
+ if (start )
1028
+ sh_eth_write (ndev , mdp -> cd -> ecsipr_value , ECSIPR );
1027
1029
1028
1030
/* Set MAC address */
1029
1031
update_mac_address (ndev );
@@ -1036,10 +1038,12 @@ static int sh_eth_dev_init(struct net_device *ndev)
1036
1038
if (mdp -> cd -> tpauser )
1037
1039
sh_eth_write (ndev , TPAUSER_UNLIMITED , TPAUSER );
1038
1040
1039
- /* Setting the Rx mode will start the Rx process. */
1040
- sh_eth_write (ndev , EDRRR_R , EDRRR );
1041
+ if (start ) {
1042
+ /* Setting the Rx mode will start the Rx process. */
1043
+ sh_eth_write (ndev , EDRRR_R , EDRRR );
1041
1044
1042
- netif_start_queue (ndev );
1045
+ netif_start_queue (ndev );
1046
+ }
1043
1047
1044
1048
out :
1045
1049
return ret ;
@@ -1054,7 +1058,7 @@ static int sh_eth_txfree(struct net_device *ndev)
1054
1058
int entry = 0 ;
1055
1059
1056
1060
for (; mdp -> cur_tx - mdp -> dirty_tx > 0 ; mdp -> dirty_tx ++ ) {
1057
- entry = mdp -> dirty_tx % TX_RING_SIZE ;
1061
+ entry = mdp -> dirty_tx % mdp -> num_tx_ring ;
1058
1062
txdesc = & mdp -> tx_ring [entry ];
1059
1063
if (txdesc -> status & cpu_to_edmac (mdp , TD_TACT ))
1060
1064
break ;
@@ -1067,7 +1071,7 @@ static int sh_eth_txfree(struct net_device *ndev)
1067
1071
freeNum ++ ;
1068
1072
}
1069
1073
txdesc -> status = cpu_to_edmac (mdp , TD_TFP );
1070
- if (entry >= TX_RING_SIZE - 1 )
1074
+ if (entry >= mdp -> num_tx_ring - 1 )
1071
1075
txdesc -> status |= cpu_to_edmac (mdp , TD_TDLE );
1072
1076
1073
1077
ndev -> stats .tx_packets ++ ;
@@ -1082,8 +1086,8 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status)
1082
1086
struct sh_eth_private * mdp = netdev_priv (ndev );
1083
1087
struct sh_eth_rxdesc * rxdesc ;
1084
1088
1085
- int entry = mdp -> cur_rx % RX_RING_SIZE ;
1086
- int boguscnt = (mdp -> dirty_rx + RX_RING_SIZE ) - mdp -> cur_rx ;
1089
+ int entry = mdp -> cur_rx % mdp -> num_rx_ring ;
1090
+ int boguscnt = (mdp -> dirty_rx + mdp -> num_rx_ring ) - mdp -> cur_rx ;
1087
1091
struct sk_buff * skb ;
1088
1092
u16 pkt_len = 0 ;
1089
1093
u32 desc_status ;
@@ -1134,13 +1138,13 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status)
1134
1138
ndev -> stats .rx_bytes += pkt_len ;
1135
1139
}
1136
1140
rxdesc -> status |= cpu_to_edmac (mdp , RD_RACT );
1137
- entry = (++ mdp -> cur_rx ) % RX_RING_SIZE ;
1141
+ entry = (++ mdp -> cur_rx ) % mdp -> num_rx_ring ;
1138
1142
rxdesc = & mdp -> rx_ring [entry ];
1139
1143
}
1140
1144
1141
1145
/* Refill the Rx ring buffers. */
1142
1146
for (; mdp -> cur_rx - mdp -> dirty_rx > 0 ; mdp -> dirty_rx ++ ) {
1143
- entry = mdp -> dirty_rx % RX_RING_SIZE ;
1147
+ entry = mdp -> dirty_rx % mdp -> num_rx_ring ;
1144
1148
rxdesc = & mdp -> rx_ring [entry ];
1145
1149
/* The size of the buffer is 16 byte boundary. */
1146
1150
rxdesc -> buffer_length = ALIGN (mdp -> rx_buf_sz , 16 );
@@ -1157,7 +1161,7 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status)
1157
1161
skb_checksum_none_assert (skb );
1158
1162
rxdesc -> addr = virt_to_phys (PTR_ALIGN (skb -> data , 4 ));
1159
1163
}
1160
- if (entry >= RX_RING_SIZE - 1 )
1164
+ if (entry >= mdp -> num_rx_ring - 1 )
1161
1165
rxdesc -> status |=
1162
1166
cpu_to_edmac (mdp , RD_RACT | RD_RFP | RD_RDEL );
1163
1167
else
@@ -1557,6 +1561,71 @@ static void sh_eth_get_strings(struct net_device *ndev, u32 stringset, u8 *data)
1557
1561
}
1558
1562
}
1559
1563
1564
+ static void sh_eth_get_ringparam (struct net_device * ndev ,
1565
+ struct ethtool_ringparam * ring )
1566
+ {
1567
+ struct sh_eth_private * mdp = netdev_priv (ndev );
1568
+
1569
+ ring -> rx_max_pending = RX_RING_MAX ;
1570
+ ring -> tx_max_pending = TX_RING_MAX ;
1571
+ ring -> rx_pending = mdp -> num_rx_ring ;
1572
+ ring -> tx_pending = mdp -> num_tx_ring ;
1573
+ }
1574
+
1575
+ static int sh_eth_set_ringparam (struct net_device * ndev ,
1576
+ struct ethtool_ringparam * ring )
1577
+ {
1578
+ struct sh_eth_private * mdp = netdev_priv (ndev );
1579
+ int ret ;
1580
+
1581
+ if (ring -> tx_pending > TX_RING_MAX ||
1582
+ ring -> rx_pending > RX_RING_MAX ||
1583
+ ring -> tx_pending < TX_RING_MIN ||
1584
+ ring -> rx_pending < RX_RING_MIN )
1585
+ return - EINVAL ;
1586
+ if (ring -> rx_mini_pending || ring -> rx_jumbo_pending )
1587
+ return - EINVAL ;
1588
+
1589
+ if (netif_running (ndev )) {
1590
+ netif_tx_disable (ndev );
1591
+ /* Disable interrupts by clearing the interrupt mask. */
1592
+ sh_eth_write (ndev , 0x0000 , EESIPR );
1593
+ /* Stop the chip's Tx and Rx processes. */
1594
+ sh_eth_write (ndev , 0 , EDTRR );
1595
+ sh_eth_write (ndev , 0 , EDRRR );
1596
+ synchronize_irq (ndev -> irq );
1597
+ }
1598
+
1599
+ /* Free all the skbuffs in the Rx queue. */
1600
+ sh_eth_ring_free (ndev );
1601
+ /* Free DMA buffer */
1602
+ sh_eth_free_dma_buffer (mdp );
1603
+
1604
+ /* Set new parameters */
1605
+ mdp -> num_rx_ring = ring -> rx_pending ;
1606
+ mdp -> num_tx_ring = ring -> tx_pending ;
1607
+
1608
+ ret = sh_eth_ring_init (ndev );
1609
+ if (ret < 0 ) {
1610
+ dev_err (& ndev -> dev , "%s: sh_eth_ring_init failed.\n" , __func__ );
1611
+ return ret ;
1612
+ }
1613
+ ret = sh_eth_dev_init (ndev , false);
1614
+ if (ret < 0 ) {
1615
+ dev_err (& ndev -> dev , "%s: sh_eth_dev_init failed.\n" , __func__ );
1616
+ return ret ;
1617
+ }
1618
+
1619
+ if (netif_running (ndev )) {
1620
+ sh_eth_write (ndev , mdp -> cd -> eesipr_value , EESIPR );
1621
+ /* Setting the Rx mode will start the Rx process. */
1622
+ sh_eth_write (ndev , EDRRR_R , EDRRR );
1623
+ netif_wake_queue (ndev );
1624
+ }
1625
+
1626
+ return 0 ;
1627
+ }
1628
+
1560
1629
static const struct ethtool_ops sh_eth_ethtool_ops = {
1561
1630
.get_settings = sh_eth_get_settings ,
1562
1631
.set_settings = sh_eth_set_settings ,
@@ -1567,6 +1636,8 @@ static const struct ethtool_ops sh_eth_ethtool_ops = {
1567
1636
.get_strings = sh_eth_get_strings ,
1568
1637
.get_ethtool_stats = sh_eth_get_ethtool_stats ,
1569
1638
.get_sset_count = sh_eth_get_sset_count ,
1639
+ .get_ringparam = sh_eth_get_ringparam ,
1640
+ .set_ringparam = sh_eth_set_ringparam ,
1570
1641
};
1571
1642
1572
1643
/* network device open function */
@@ -1597,7 +1668,7 @@ static int sh_eth_open(struct net_device *ndev)
1597
1668
goto out_free_irq ;
1598
1669
1599
1670
/* device init */
1600
- ret = sh_eth_dev_init (ndev );
1671
+ ret = sh_eth_dev_init (ndev , true );
1601
1672
if (ret )
1602
1673
goto out_free_irq ;
1603
1674
@@ -1631,22 +1702,22 @@ static void sh_eth_tx_timeout(struct net_device *ndev)
1631
1702
ndev -> stats .tx_errors ++ ;
1632
1703
1633
1704
/* Free all the skbuffs in the Rx queue. */
1634
- for (i = 0 ; i < RX_RING_SIZE ; i ++ ) {
1705
+ for (i = 0 ; i < mdp -> num_rx_ring ; i ++ ) {
1635
1706
rxdesc = & mdp -> rx_ring [i ];
1636
1707
rxdesc -> status = 0 ;
1637
1708
rxdesc -> addr = 0xBADF00D0 ;
1638
1709
if (mdp -> rx_skbuff [i ])
1639
1710
dev_kfree_skb (mdp -> rx_skbuff [i ]);
1640
1711
mdp -> rx_skbuff [i ] = NULL ;
1641
1712
}
1642
- for (i = 0 ; i < TX_RING_SIZE ; i ++ ) {
1713
+ for (i = 0 ; i < mdp -> num_tx_ring ; i ++ ) {
1643
1714
if (mdp -> tx_skbuff [i ])
1644
1715
dev_kfree_skb (mdp -> tx_skbuff [i ]);
1645
1716
mdp -> tx_skbuff [i ] = NULL ;
1646
1717
}
1647
1718
1648
1719
/* device init */
1649
- sh_eth_dev_init (ndev );
1720
+ sh_eth_dev_init (ndev , true );
1650
1721
}
1651
1722
1652
1723
/* Packet transmit function */
@@ -1658,7 +1729,7 @@ static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev)
1658
1729
unsigned long flags ;
1659
1730
1660
1731
spin_lock_irqsave (& mdp -> lock , flags );
1661
- if ((mdp -> cur_tx - mdp -> dirty_tx ) >= (TX_RING_SIZE - 4 )) {
1732
+ if ((mdp -> cur_tx - mdp -> dirty_tx ) >= (mdp -> num_tx_ring - 4 )) {
1662
1733
if (!sh_eth_txfree (ndev )) {
1663
1734
if (netif_msg_tx_queued (mdp ))
1664
1735
dev_warn (& ndev -> dev , "TxFD exhausted.\n" );
@@ -1669,7 +1740,7 @@ static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev)
1669
1740
}
1670
1741
spin_unlock_irqrestore (& mdp -> lock , flags );
1671
1742
1672
- entry = mdp -> cur_tx % TX_RING_SIZE ;
1743
+ entry = mdp -> cur_tx % mdp -> num_tx_ring ;
1673
1744
mdp -> tx_skbuff [entry ] = skb ;
1674
1745
txdesc = & mdp -> tx_ring [entry ];
1675
1746
/* soft swap. */
@@ -1683,7 +1754,7 @@ static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev)
1683
1754
else
1684
1755
txdesc -> buffer_length = skb -> len ;
1685
1756
1686
- if (entry >= TX_RING_SIZE - 1 )
1757
+ if (entry >= mdp -> num_tx_ring - 1 )
1687
1758
txdesc -> status |= cpu_to_edmac (mdp , TD_TACT | TD_TDLE );
1688
1759
else
1689
1760
txdesc -> status |= cpu_to_edmac (mdp , TD_TACT );
@@ -2313,6 +2384,8 @@ static int sh_eth_drv_probe(struct platform_device *pdev)
2313
2384
ether_setup (ndev );
2314
2385
2315
2386
mdp = netdev_priv (ndev );
2387
+ mdp -> num_tx_ring = TX_RING_SIZE ;
2388
+ mdp -> num_rx_ring = RX_RING_SIZE ;
2316
2389
mdp -> addr = ioremap (res -> start , resource_size (res ));
2317
2390
if (mdp -> addr == NULL ) {
2318
2391
ret = - ENOMEM ;
0 commit comments