Skip to content

Commit 525b807

Browse files
shimodaydavem330
authored andcommitted
net: sh_eth: add support for set_ringparam/get_ringparam
This patch supports the ethtool's set_ringparam() and get_ringparam(). Signed-off-by: Yoshihiro Shimoda <[email protected]> Signed-off-by: David S. Miller <[email protected]>
1 parent 91c7755 commit 525b807

File tree

2 files changed

+112
-33
lines changed

2 files changed

+112
-33
lines changed

drivers/net/ethernet/renesas/sh_eth.c

Lines changed: 106 additions & 33 deletions
Original file line numberDiff line numberDiff line change
@@ -782,7 +782,7 @@ static void sh_eth_ring_free(struct net_device *ndev)
782782

783783
/* Free Rx skb ringbuffer */
784784
if (mdp->rx_skbuff) {
785-
for (i = 0; i < RX_RING_SIZE; i++) {
785+
for (i = 0; i < mdp->num_rx_ring; i++) {
786786
if (mdp->rx_skbuff[i])
787787
dev_kfree_skb(mdp->rx_skbuff[i]);
788788
}
@@ -792,7 +792,7 @@ static void sh_eth_ring_free(struct net_device *ndev)
792792

793793
/* Free Tx skb ringbuffer */
794794
if (mdp->tx_skbuff) {
795-
for (i = 0; i < TX_RING_SIZE; i++) {
795+
for (i = 0; i < mdp->num_tx_ring; i++) {
796796
if (mdp->tx_skbuff[i])
797797
dev_kfree_skb(mdp->tx_skbuff[i]);
798798
}
@@ -809,16 +809,16 @@ static void sh_eth_ring_format(struct net_device *ndev)
809809
struct sk_buff *skb;
810810
struct sh_eth_rxdesc *rxdesc = NULL;
811811
struct sh_eth_txdesc *txdesc = NULL;
812-
int rx_ringsize = sizeof(*rxdesc) * RX_RING_SIZE;
813-
int tx_ringsize = sizeof(*txdesc) * TX_RING_SIZE;
812+
int rx_ringsize = sizeof(*rxdesc) * mdp->num_rx_ring;
813+
int tx_ringsize = sizeof(*txdesc) * mdp->num_tx_ring;
814814

815815
mdp->cur_rx = mdp->cur_tx = 0;
816816
mdp->dirty_rx = mdp->dirty_tx = 0;
817817

818818
memset(mdp->rx_ring, 0, rx_ringsize);
819819

820820
/* build Rx ring buffer */
821-
for (i = 0; i < RX_RING_SIZE; i++) {
821+
for (i = 0; i < mdp->num_rx_ring; i++) {
822822
/* skb */
823823
mdp->rx_skbuff[i] = NULL;
824824
skb = netdev_alloc_skb(ndev, mdp->rx_buf_sz);
@@ -844,15 +844,15 @@ static void sh_eth_ring_format(struct net_device *ndev)
844844
}
845845
}
846846

847-
mdp->dirty_rx = (u32) (i - RX_RING_SIZE);
847+
mdp->dirty_rx = (u32) (i - mdp->num_rx_ring);
848848

849849
/* Mark the last entry as wrapping the ring. */
850850
rxdesc->status |= cpu_to_edmac(mdp, RD_RDEL);
851851

852852
memset(mdp->tx_ring, 0, tx_ringsize);
853853

854854
/* build Tx ring buffer */
855-
for (i = 0; i < TX_RING_SIZE; i++) {
855+
for (i = 0; i < mdp->num_tx_ring; i++) {
856856
mdp->tx_skbuff[i] = NULL;
857857
txdesc = &mdp->tx_ring[i];
858858
txdesc->status = cpu_to_edmac(mdp, TD_TFP);
@@ -886,15 +886,15 @@ static int sh_eth_ring_init(struct net_device *ndev)
886886
mdp->rx_buf_sz += NET_IP_ALIGN;
887887

888888
/* Allocate RX and TX skb rings */
889-
mdp->rx_skbuff = kmalloc(sizeof(*mdp->rx_skbuff) * RX_RING_SIZE,
889+
mdp->rx_skbuff = kmalloc(sizeof(*mdp->rx_skbuff) * mdp->num_rx_ring,
890890
GFP_KERNEL);
891891
if (!mdp->rx_skbuff) {
892892
dev_err(&ndev->dev, "Cannot allocate Rx skb\n");
893893
ret = -ENOMEM;
894894
return ret;
895895
}
896896

897-
mdp->tx_skbuff = kmalloc(sizeof(*mdp->tx_skbuff) * TX_RING_SIZE,
897+
mdp->tx_skbuff = kmalloc(sizeof(*mdp->tx_skbuff) * mdp->num_tx_ring,
898898
GFP_KERNEL);
899899
if (!mdp->tx_skbuff) {
900900
dev_err(&ndev->dev, "Cannot allocate Tx skb\n");
@@ -903,7 +903,7 @@ static int sh_eth_ring_init(struct net_device *ndev)
903903
}
904904

905905
/* Allocate all Rx descriptors. */
906-
rx_ringsize = sizeof(struct sh_eth_rxdesc) * RX_RING_SIZE;
906+
rx_ringsize = sizeof(struct sh_eth_rxdesc) * mdp->num_rx_ring;
907907
mdp->rx_ring = dma_alloc_coherent(NULL, rx_ringsize, &mdp->rx_desc_dma,
908908
GFP_KERNEL);
909909

@@ -917,7 +917,7 @@ static int sh_eth_ring_init(struct net_device *ndev)
917917
mdp->dirty_rx = 0;
918918

919919
/* Allocate all Tx descriptors. */
920-
tx_ringsize = sizeof(struct sh_eth_txdesc) * TX_RING_SIZE;
920+
tx_ringsize = sizeof(struct sh_eth_txdesc) * mdp->num_tx_ring;
921921
mdp->tx_ring = dma_alloc_coherent(NULL, tx_ringsize, &mdp->tx_desc_dma,
922922
GFP_KERNEL);
923923
if (!mdp->tx_ring) {
@@ -946,21 +946,21 @@ static void sh_eth_free_dma_buffer(struct sh_eth_private *mdp)
946946
int ringsize;
947947

948948
if (mdp->rx_ring) {
949-
ringsize = sizeof(struct sh_eth_rxdesc) * RX_RING_SIZE;
949+
ringsize = sizeof(struct sh_eth_rxdesc) * mdp->num_rx_ring;
950950
dma_free_coherent(NULL, ringsize, mdp->rx_ring,
951951
mdp->rx_desc_dma);
952952
mdp->rx_ring = NULL;
953953
}
954954

955955
if (mdp->tx_ring) {
956-
ringsize = sizeof(struct sh_eth_txdesc) * TX_RING_SIZE;
956+
ringsize = sizeof(struct sh_eth_txdesc) * mdp->num_tx_ring;
957957
dma_free_coherent(NULL, ringsize, mdp->tx_ring,
958958
mdp->tx_desc_dma);
959959
mdp->tx_ring = NULL;
960960
}
961961
}
962962

963-
static int sh_eth_dev_init(struct net_device *ndev)
963+
static int sh_eth_dev_init(struct net_device *ndev, bool start)
964964
{
965965
int ret = 0;
966966
struct sh_eth_private *mdp = netdev_priv(ndev);
@@ -1008,7 +1008,8 @@ static int sh_eth_dev_init(struct net_device *ndev)
10081008
RFLR);
10091009

10101010
sh_eth_write(ndev, sh_eth_read(ndev, EESR), EESR);
1011-
sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR);
1011+
if (start)
1012+
sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR);
10121013

10131014
/* PAUSE Prohibition */
10141015
val = (sh_eth_read(ndev, ECMR) & ECMR_DM) |
@@ -1023,7 +1024,8 @@ static int sh_eth_dev_init(struct net_device *ndev)
10231024
sh_eth_write(ndev, mdp->cd->ecsr_value, ECSR);
10241025

10251026
/* E-MAC Interrupt Enable register */
1026-
sh_eth_write(ndev, mdp->cd->ecsipr_value, ECSIPR);
1027+
if (start)
1028+
sh_eth_write(ndev, mdp->cd->ecsipr_value, ECSIPR);
10271029

10281030
/* Set MAC address */
10291031
update_mac_address(ndev);
@@ -1036,10 +1038,12 @@ static int sh_eth_dev_init(struct net_device *ndev)
10361038
if (mdp->cd->tpauser)
10371039
sh_eth_write(ndev, TPAUSER_UNLIMITED, TPAUSER);
10381040

1039-
/* Setting the Rx mode will start the Rx process. */
1040-
sh_eth_write(ndev, EDRRR_R, EDRRR);
1041+
if (start) {
1042+
/* Setting the Rx mode will start the Rx process. */
1043+
sh_eth_write(ndev, EDRRR_R, EDRRR);
10411044

1042-
netif_start_queue(ndev);
1045+
netif_start_queue(ndev);
1046+
}
10431047

10441048
out:
10451049
return ret;
@@ -1054,7 +1058,7 @@ static int sh_eth_txfree(struct net_device *ndev)
10541058
int entry = 0;
10551059

10561060
for (; mdp->cur_tx - mdp->dirty_tx > 0; mdp->dirty_tx++) {
1057-
entry = mdp->dirty_tx % TX_RING_SIZE;
1061+
entry = mdp->dirty_tx % mdp->num_tx_ring;
10581062
txdesc = &mdp->tx_ring[entry];
10591063
if (txdesc->status & cpu_to_edmac(mdp, TD_TACT))
10601064
break;
@@ -1067,7 +1071,7 @@ static int sh_eth_txfree(struct net_device *ndev)
10671071
freeNum++;
10681072
}
10691073
txdesc->status = cpu_to_edmac(mdp, TD_TFP);
1070-
if (entry >= TX_RING_SIZE - 1)
1074+
if (entry >= mdp->num_tx_ring - 1)
10711075
txdesc->status |= cpu_to_edmac(mdp, TD_TDLE);
10721076

10731077
ndev->stats.tx_packets++;
@@ -1082,8 +1086,8 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status)
10821086
struct sh_eth_private *mdp = netdev_priv(ndev);
10831087
struct sh_eth_rxdesc *rxdesc;
10841088

1085-
int entry = mdp->cur_rx % RX_RING_SIZE;
1086-
int boguscnt = (mdp->dirty_rx + RX_RING_SIZE) - mdp->cur_rx;
1089+
int entry = mdp->cur_rx % mdp->num_rx_ring;
1090+
int boguscnt = (mdp->dirty_rx + mdp->num_rx_ring) - mdp->cur_rx;
10871091
struct sk_buff *skb;
10881092
u16 pkt_len = 0;
10891093
u32 desc_status;
@@ -1134,13 +1138,13 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status)
11341138
ndev->stats.rx_bytes += pkt_len;
11351139
}
11361140
rxdesc->status |= cpu_to_edmac(mdp, RD_RACT);
1137-
entry = (++mdp->cur_rx) % RX_RING_SIZE;
1141+
entry = (++mdp->cur_rx) % mdp->num_rx_ring;
11381142
rxdesc = &mdp->rx_ring[entry];
11391143
}
11401144

11411145
/* Refill the Rx ring buffers. */
11421146
for (; mdp->cur_rx - mdp->dirty_rx > 0; mdp->dirty_rx++) {
1143-
entry = mdp->dirty_rx % RX_RING_SIZE;
1147+
entry = mdp->dirty_rx % mdp->num_rx_ring;
11441148
rxdesc = &mdp->rx_ring[entry];
11451149
/* The size of the buffer is 16 byte boundary. */
11461150
rxdesc->buffer_length = ALIGN(mdp->rx_buf_sz, 16);
@@ -1157,7 +1161,7 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status)
11571161
skb_checksum_none_assert(skb);
11581162
rxdesc->addr = virt_to_phys(PTR_ALIGN(skb->data, 4));
11591163
}
1160-
if (entry >= RX_RING_SIZE - 1)
1164+
if (entry >= mdp->num_rx_ring - 1)
11611165
rxdesc->status |=
11621166
cpu_to_edmac(mdp, RD_RACT | RD_RFP | RD_RDEL);
11631167
else
@@ -1557,6 +1561,71 @@ static void sh_eth_get_strings(struct net_device *ndev, u32 stringset, u8 *data)
15571561
}
15581562
}
15591563

1564+
static void sh_eth_get_ringparam(struct net_device *ndev,
1565+
struct ethtool_ringparam *ring)
1566+
{
1567+
struct sh_eth_private *mdp = netdev_priv(ndev);
1568+
1569+
ring->rx_max_pending = RX_RING_MAX;
1570+
ring->tx_max_pending = TX_RING_MAX;
1571+
ring->rx_pending = mdp->num_rx_ring;
1572+
ring->tx_pending = mdp->num_tx_ring;
1573+
}
1574+
1575+
static int sh_eth_set_ringparam(struct net_device *ndev,
1576+
struct ethtool_ringparam *ring)
1577+
{
1578+
struct sh_eth_private *mdp = netdev_priv(ndev);
1579+
int ret;
1580+
1581+
if (ring->tx_pending > TX_RING_MAX ||
1582+
ring->rx_pending > RX_RING_MAX ||
1583+
ring->tx_pending < TX_RING_MIN ||
1584+
ring->rx_pending < RX_RING_MIN)
1585+
return -EINVAL;
1586+
if (ring->rx_mini_pending || ring->rx_jumbo_pending)
1587+
return -EINVAL;
1588+
1589+
if (netif_running(ndev)) {
1590+
netif_tx_disable(ndev);
1591+
/* Disable interrupts by clearing the interrupt mask. */
1592+
sh_eth_write(ndev, 0x0000, EESIPR);
1593+
/* Stop the chip's Tx and Rx processes. */
1594+
sh_eth_write(ndev, 0, EDTRR);
1595+
sh_eth_write(ndev, 0, EDRRR);
1596+
synchronize_irq(ndev->irq);
1597+
}
1598+
1599+
/* Free all the skbuffs in the Rx queue. */
1600+
sh_eth_ring_free(ndev);
1601+
/* Free DMA buffer */
1602+
sh_eth_free_dma_buffer(mdp);
1603+
1604+
/* Set new parameters */
1605+
mdp->num_rx_ring = ring->rx_pending;
1606+
mdp->num_tx_ring = ring->tx_pending;
1607+
1608+
ret = sh_eth_ring_init(ndev);
1609+
if (ret < 0) {
1610+
dev_err(&ndev->dev, "%s: sh_eth_ring_init failed.\n", __func__);
1611+
return ret;
1612+
}
1613+
ret = sh_eth_dev_init(ndev, false);
1614+
if (ret < 0) {
1615+
dev_err(&ndev->dev, "%s: sh_eth_dev_init failed.\n", __func__);
1616+
return ret;
1617+
}
1618+
1619+
if (netif_running(ndev)) {
1620+
sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR);
1621+
/* Setting the Rx mode will start the Rx process. */
1622+
sh_eth_write(ndev, EDRRR_R, EDRRR);
1623+
netif_wake_queue(ndev);
1624+
}
1625+
1626+
return 0;
1627+
}
1628+
15601629
static const struct ethtool_ops sh_eth_ethtool_ops = {
15611630
.get_settings = sh_eth_get_settings,
15621631
.set_settings = sh_eth_set_settings,
@@ -1567,6 +1636,8 @@ static const struct ethtool_ops sh_eth_ethtool_ops = {
15671636
.get_strings = sh_eth_get_strings,
15681637
.get_ethtool_stats = sh_eth_get_ethtool_stats,
15691638
.get_sset_count = sh_eth_get_sset_count,
1639+
.get_ringparam = sh_eth_get_ringparam,
1640+
.set_ringparam = sh_eth_set_ringparam,
15701641
};
15711642

15721643
/* network device open function */
@@ -1597,7 +1668,7 @@ static int sh_eth_open(struct net_device *ndev)
15971668
goto out_free_irq;
15981669

15991670
/* device init */
1600-
ret = sh_eth_dev_init(ndev);
1671+
ret = sh_eth_dev_init(ndev, true);
16011672
if (ret)
16021673
goto out_free_irq;
16031674

@@ -1631,22 +1702,22 @@ static void sh_eth_tx_timeout(struct net_device *ndev)
16311702
ndev->stats.tx_errors++;
16321703

16331704
/* Free all the skbuffs in the Rx queue. */
1634-
for (i = 0; i < RX_RING_SIZE; i++) {
1705+
for (i = 0; i < mdp->num_rx_ring; i++) {
16351706
rxdesc = &mdp->rx_ring[i];
16361707
rxdesc->status = 0;
16371708
rxdesc->addr = 0xBADF00D0;
16381709
if (mdp->rx_skbuff[i])
16391710
dev_kfree_skb(mdp->rx_skbuff[i]);
16401711
mdp->rx_skbuff[i] = NULL;
16411712
}
1642-
for (i = 0; i < TX_RING_SIZE; i++) {
1713+
for (i = 0; i < mdp->num_tx_ring; i++) {
16431714
if (mdp->tx_skbuff[i])
16441715
dev_kfree_skb(mdp->tx_skbuff[i]);
16451716
mdp->tx_skbuff[i] = NULL;
16461717
}
16471718

16481719
/* device init */
1649-
sh_eth_dev_init(ndev);
1720+
sh_eth_dev_init(ndev, true);
16501721
}
16511722

16521723
/* Packet transmit function */
@@ -1658,7 +1729,7 @@ static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev)
16581729
unsigned long flags;
16591730

16601731
spin_lock_irqsave(&mdp->lock, flags);
1661-
if ((mdp->cur_tx - mdp->dirty_tx) >= (TX_RING_SIZE - 4)) {
1732+
if ((mdp->cur_tx - mdp->dirty_tx) >= (mdp->num_tx_ring - 4)) {
16621733
if (!sh_eth_txfree(ndev)) {
16631734
if (netif_msg_tx_queued(mdp))
16641735
dev_warn(&ndev->dev, "TxFD exhausted.\n");
@@ -1669,7 +1740,7 @@ static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev)
16691740
}
16701741
spin_unlock_irqrestore(&mdp->lock, flags);
16711742

1672-
entry = mdp->cur_tx % TX_RING_SIZE;
1743+
entry = mdp->cur_tx % mdp->num_tx_ring;
16731744
mdp->tx_skbuff[entry] = skb;
16741745
txdesc = &mdp->tx_ring[entry];
16751746
/* soft swap. */
@@ -1683,7 +1754,7 @@ static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev)
16831754
else
16841755
txdesc->buffer_length = skb->len;
16851756

1686-
if (entry >= TX_RING_SIZE - 1)
1757+
if (entry >= mdp->num_tx_ring - 1)
16871758
txdesc->status |= cpu_to_edmac(mdp, TD_TACT | TD_TDLE);
16881759
else
16891760
txdesc->status |= cpu_to_edmac(mdp, TD_TACT);
@@ -2313,6 +2384,8 @@ static int sh_eth_drv_probe(struct platform_device *pdev)
23132384
ether_setup(ndev);
23142385

23152386
mdp = netdev_priv(ndev);
2387+
mdp->num_tx_ring = TX_RING_SIZE;
2388+
mdp->num_rx_ring = RX_RING_SIZE;
23162389
mdp->addr = ioremap(res->start, resource_size(res));
23172390
if (mdp->addr == NULL) {
23182391
ret = -ENOMEM;

drivers/net/ethernet/renesas/sh_eth.h

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -27,6 +27,10 @@
2727
#define TX_TIMEOUT (5*HZ)
2828
#define TX_RING_SIZE 64 /* Tx ring size */
2929
#define RX_RING_SIZE 64 /* Rx ring size */
30+
#define TX_RING_MIN 64
31+
#define RX_RING_MIN 64
32+
#define TX_RING_MAX 1024
33+
#define RX_RING_MAX 1024
3034
#define ETHERSMALL 60
3135
#define PKT_BUF_SZ 1538
3236
#define SH_ETH_TSU_TIMEOUT_MS 500
@@ -701,6 +705,8 @@ struct sh_eth_private {
701705
const u16 *reg_offset;
702706
void __iomem *addr;
703707
void __iomem *tsu_addr;
708+
u32 num_rx_ring;
709+
u32 num_tx_ring;
704710
dma_addr_t rx_desc_dma;
705711
dma_addr_t tx_desc_dma;
706712
struct sh_eth_rxdesc *rx_ring;

0 commit comments

Comments
 (0)