Skip to content

Commit c22a3f4

Browse files
Joao Pintodavem330
authored andcommitted
net: stmmac: adding multiple napi mechanism
This patch adds the napi variable to the stmmac_rx_queue structure and forces that operations like netif_queue_stopped, netif_wake_queue, netif_stop_queue, netdev_reset_queue and netdev_sent_queue be made by queue. Signed-off-by: Joao Pinto <[email protected]> Tested-by: Niklas Cassel <[email protected]> Signed-off-by: David S. Miller <[email protected]>
1 parent ce73678 commit c22a3f4

File tree

2 files changed

+120
-41
lines changed

2 files changed

+120
-41
lines changed

drivers/net/ethernet/stmicro/stmmac/stmmac.h

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -72,6 +72,7 @@ struct stmmac_rx_queue {
7272
u32 rx_zeroc_thresh;
7373
dma_addr_t dma_rx_phy;
7474
u32 rx_tail_addr;
75+
struct napi_struct napi ____cacheline_aligned_in_smp;
7576
};
7677

7778
struct stmmac_priv {
@@ -91,8 +92,6 @@ struct stmmac_priv {
9192
u32 rx_riwt;
9293
int hwts_rx_en;
9394

94-
struct napi_struct napi ____cacheline_aligned_in_smp;
95-
9695
void __iomem *ioaddr;
9796
struct net_device *dev;
9897
struct device *device;

drivers/net/ethernet/stmicro/stmmac/stmmac_main.c

Lines changed: 119 additions & 39 deletions
Original file line numberDiff line numberDiff line change
@@ -138,6 +138,64 @@ static void stmmac_verify_args(void)
138138
eee_timer = STMMAC_DEFAULT_LPI_TIMER;
139139
}
140140

141+
/**
142+
* stmmac_disable_all_queues - Disable all queues
143+
* @priv: driver private structure
144+
*/
145+
static void stmmac_disable_all_queues(struct stmmac_priv *priv)
146+
{
147+
u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
148+
u32 queue;
149+
150+
for (queue = 0; queue < rx_queues_cnt; queue++) {
151+
struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
152+
153+
napi_disable(&rx_q->napi);
154+
}
155+
}
156+
157+
/**
158+
* stmmac_enable_all_queues - Enable all queues
159+
* @priv: driver private structure
160+
*/
161+
static void stmmac_enable_all_queues(struct stmmac_priv *priv)
162+
{
163+
u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
164+
u32 queue;
165+
166+
for (queue = 0; queue < rx_queues_cnt; queue++) {
167+
struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
168+
169+
napi_enable(&rx_q->napi);
170+
}
171+
}
172+
173+
/**
174+
* stmmac_stop_all_queues - Stop all queues
175+
* @priv: driver private structure
176+
*/
177+
static void stmmac_stop_all_queues(struct stmmac_priv *priv)
178+
{
179+
u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
180+
u32 queue;
181+
182+
for (queue = 0; queue < tx_queues_cnt; queue++)
183+
netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
184+
}
185+
186+
/**
187+
* stmmac_start_all_queues - Start all queues
188+
* @priv: driver private structure
189+
*/
190+
static void stmmac_start_all_queues(struct stmmac_priv *priv)
191+
{
192+
u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
193+
u32 queue;
194+
195+
for (queue = 0; queue < tx_queues_cnt; queue++)
196+
netif_tx_start_queue(netdev_get_tx_queue(priv->dev, queue));
197+
}
198+
141199
/**
142200
* stmmac_clk_csr_set - dynamically set the MDC clock
143201
* @priv: driver private structure
@@ -1262,7 +1320,6 @@ static int init_dma_tx_desc_rings(struct net_device *dev)
12621320

12631321
for (i = 0; i < DMA_TX_SIZE; i++) {
12641322
struct dma_desc *p;
1265-
12661323
if (priv->extend_desc)
12671324
p = &((tx_q->dma_etx + i)->basic);
12681325
else
@@ -1286,9 +1343,9 @@ static int init_dma_tx_desc_rings(struct net_device *dev)
12861343

12871344
tx_q->dirty_tx = 0;
12881345
tx_q->cur_tx = 0;
1289-
}
12901346

1291-
netdev_reset_queue(priv->dev);
1347+
netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, queue));
1348+
}
12921349

12931350
return 0;
12941351
}
@@ -1805,13 +1862,16 @@ static void stmmac_tx_clean(struct stmmac_priv *priv, u32 queue)
18051862
}
18061863
tx_q->dirty_tx = entry;
18071864

1808-
netdev_completed_queue(priv->dev, pkts_compl, bytes_compl);
1865+
netdev_tx_completed_queue(netdev_get_tx_queue(priv->dev, queue),
1866+
pkts_compl, bytes_compl);
1867+
1868+
if (unlikely(netif_tx_queue_stopped(netdev_get_tx_queue(priv->dev,
1869+
queue))) &&
1870+
stmmac_tx_avail(priv, queue) > STMMAC_TX_THRESH) {
18091871

1810-
if (unlikely(netif_queue_stopped(priv->dev) &&
1811-
stmmac_tx_avail(priv, queue) > STMMAC_TX_THRESH)) {
18121872
netif_dbg(priv, tx_done, priv->dev,
18131873
"%s: restart transmit\n", __func__);
1814-
netif_wake_queue(priv->dev);
1874+
netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, queue));
18151875
}
18161876

18171877
if ((priv->eee_enabled) && (!priv->tx_path_in_lpi_mode)) {
@@ -1843,7 +1903,7 @@ static void stmmac_tx_err(struct stmmac_priv *priv, u32 chan)
18431903
struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
18441904
int i;
18451905

1846-
netif_stop_queue(priv->dev);
1906+
netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, chan));
18471907

18481908
stmmac_stop_tx_dma(priv, chan);
18491909
dma_free_tx_skbufs(priv, chan);
@@ -1858,11 +1918,11 @@ static void stmmac_tx_err(struct stmmac_priv *priv, u32 chan)
18581918
(i == DMA_TX_SIZE - 1));
18591919
tx_q->dirty_tx = 0;
18601920
tx_q->cur_tx = 0;
1861-
netdev_reset_queue(priv->dev);
1921+
netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, chan));
18621922
stmmac_start_tx_dma(priv, chan);
18631923

18641924
priv->dev->stats.tx_errors++;
1865-
netif_wake_queue(priv->dev);
1925+
netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, chan));
18661926
}
18671927

18681928
/**
@@ -1907,12 +1967,14 @@ static void stmmac_dma_interrupt(struct stmmac_priv *priv)
19071967
u32 chan;
19081968

19091969
for (chan = 0; chan < tx_channel_count; chan++) {
1970+
struct stmmac_rx_queue *rx_q = &priv->rx_queue[chan];
1971+
19101972
status = priv->hw->dma->dma_interrupt(priv->ioaddr,
19111973
&priv->xstats, chan);
19121974
if (likely((status & handle_rx)) || (status & handle_tx)) {
1913-
if (likely(napi_schedule_prep(&priv->napi))) {
1975+
if (likely(napi_schedule_prep(&rx_q->napi))) {
19141976
stmmac_disable_dma_irq(priv, chan);
1915-
__napi_schedule(&priv->napi);
1977+
__napi_schedule(&rx_q->napi);
19161978
}
19171979
}
19181980

@@ -2554,8 +2616,8 @@ static int stmmac_open(struct net_device *dev)
25542616
}
25552617
}
25562618

2557-
napi_enable(&priv->napi);
2558-
netif_start_queue(dev);
2619+
stmmac_enable_all_queues(priv);
2620+
stmmac_start_all_queues(priv);
25592621

25602622
return 0;
25612623

@@ -2598,9 +2660,9 @@ static int stmmac_release(struct net_device *dev)
25982660
phy_disconnect(dev->phydev);
25992661
}
26002662

2601-
netif_stop_queue(dev);
2663+
stmmac_stop_all_queues(priv);
26022664

2603-
napi_disable(&priv->napi);
2665+
stmmac_disable_all_queues(priv);
26042666

26052667
del_timer_sync(&priv->txtimer);
26062668

@@ -2717,8 +2779,9 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
27172779
/* Desc availability based on threshold should be enough safe */
27182780
if (unlikely(stmmac_tx_avail(priv, queue) <
27192781
(((skb->len - proto_hdr_len) / TSO_MAX_BUFF_SIZE + 1)))) {
2720-
if (!netif_queue_stopped(dev)) {
2721-
netif_stop_queue(dev);
2782+
if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
2783+
netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
2784+
queue));
27222785
/* This is a hard error, log it. */
27232786
netdev_err(priv->dev,
27242787
"%s: Tx Ring full when queue awake\n",
@@ -2798,7 +2861,7 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
27982861
if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
27992862
netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
28002863
__func__);
2801-
netif_stop_queue(dev);
2864+
netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
28022865
}
28032866

28042867
dev->stats.tx_bytes += skb->len;
@@ -2855,7 +2918,7 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
28552918
print_pkt(skb->data, skb_headlen(skb));
28562919
}
28572920

2858-
netdev_sent_queue(dev, skb->len);
2921+
netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
28592922

28602923
priv->hw->dma->set_tx_tail_ptr(priv->ioaddr, tx_q->tx_tail_addr,
28612924
queue);
@@ -2899,8 +2962,9 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
28992962
}
29002963

29012964
if (unlikely(stmmac_tx_avail(priv, queue) < nfrags + 1)) {
2902-
if (!netif_queue_stopped(dev)) {
2903-
netif_stop_queue(dev);
2965+
if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
2966+
netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
2967+
queue));
29042968
/* This is a hard error, log it. */
29052969
netdev_err(priv->dev,
29062970
"%s: Tx Ring full when queue awake\n",
@@ -2998,7 +3062,7 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
29983062
if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
29993063
netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
30003064
__func__);
3001-
netif_stop_queue(dev);
3065+
netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
30023066
}
30033067

30043068
dev->stats.tx_bytes += skb->len;
@@ -3061,7 +3125,7 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
30613125
dma_wmb();
30623126
}
30633127

3064-
netdev_sent_queue(dev, skb->len);
3128+
netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
30653129

30663130
if (priv->synopsys_id < DWMAC_CORE_4_00)
30673131
priv->hw->dma->enable_dma_transmission(priv->ioaddr);
@@ -3361,7 +3425,7 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
33613425
else
33623426
skb->ip_summed = CHECKSUM_UNNECESSARY;
33633427

3364-
napi_gro_receive(&priv->napi, skb);
3428+
napi_gro_receive(&rx_q->napi, skb);
33653429

33663430
priv->dev->stats.rx_packets++;
33673431
priv->dev->stats.rx_bytes += frame_len;
@@ -3386,21 +3450,21 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
33863450
*/
33873451
static int stmmac_poll(struct napi_struct *napi, int budget)
33883452
{
3389-
struct stmmac_priv *priv = container_of(napi, struct stmmac_priv, napi);
3453+
struct stmmac_rx_queue *rx_q =
3454+
container_of(napi, struct stmmac_rx_queue, napi);
3455+
struct stmmac_priv *priv = rx_q->priv_data;
33903456
u32 tx_count = priv->plat->tx_queues_to_use;
3391-
u32 chan = STMMAC_CHAN0;
3457+
u32 chan = rx_q->queue_index;
33923458
int work_done = 0;
3393-
u32 queue = chan;
3459+
u32 queue;
33943460

33953461
priv->xstats.napi_poll++;
33963462

33973463
/* check all the queues */
33983464
for (queue = 0; queue < tx_count; queue++)
33993465
stmmac_tx_clean(priv, queue);
34003466

3401-
queue = chan;
3402-
3403-
work_done = stmmac_rx(priv, budget, queue);
3467+
work_done = stmmac_rx(priv, budget, rx_q->queue_index);
34043468
if (work_done < budget) {
34053469
napi_complete_done(napi, work_done);
34063470
stmmac_enable_dma_irq(priv, chan);
@@ -3989,11 +4053,14 @@ int stmmac_dvr_probe(struct device *device,
39894053
struct plat_stmmacenet_data *plat_dat,
39904054
struct stmmac_resources *res)
39914055
{
3992-
int ret = 0;
39934056
struct net_device *ndev = NULL;
39944057
struct stmmac_priv *priv;
4058+
int ret = 0;
4059+
u32 queue;
39954060

3996-
ndev = alloc_etherdev(sizeof(struct stmmac_priv));
4061+
ndev = alloc_etherdev_mqs(sizeof(struct stmmac_priv),
4062+
MTL_MAX_TX_QUEUES,
4063+
MTL_MAX_RX_QUEUES);
39974064
if (!ndev)
39984065
return -ENOMEM;
39994066

@@ -4035,6 +4102,10 @@ int stmmac_dvr_probe(struct device *device,
40354102
if (ret)
40364103
goto error_hw_init;
40374104

4105+
/* Configure real RX and TX queues */
4106+
ndev->real_num_rx_queues = priv->plat->rx_queues_to_use;
4107+
ndev->real_num_tx_queues = priv->plat->tx_queues_to_use;
4108+
40384109
ndev->netdev_ops = &stmmac_netdev_ops;
40394110

40404111
ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
@@ -4084,7 +4155,12 @@ int stmmac_dvr_probe(struct device *device,
40844155
"Enable RX Mitigation via HW Watchdog Timer\n");
40854156
}
40864157

4087-
netif_napi_add(ndev, &priv->napi, stmmac_poll, 64);
4158+
for (queue = 0; queue < priv->plat->rx_queues_to_use; queue++) {
4159+
struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
4160+
4161+
netif_napi_add(ndev, &rx_q->napi, stmmac_poll,
4162+
(8 * priv->plat->rx_queues_to_use));
4163+
}
40884164

40894165
spin_lock_init(&priv->lock);
40904166

@@ -4129,7 +4205,11 @@ int stmmac_dvr_probe(struct device *device,
41294205
priv->hw->pcs != STMMAC_PCS_RTBI)
41304206
stmmac_mdio_unregister(ndev);
41314207
error_mdio_register:
4132-
netif_napi_del(&priv->napi);
4208+
for (queue = 0; queue < priv->plat->rx_queues_to_use; queue++) {
4209+
struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
4210+
4211+
netif_napi_del(&rx_q->napi);
4212+
}
41334213
error_hw_init:
41344214
free_netdev(ndev);
41354215

@@ -4191,9 +4271,9 @@ int stmmac_suspend(struct device *dev)
41914271
spin_lock_irqsave(&priv->lock, flags);
41924272

41934273
netif_device_detach(ndev);
4194-
netif_stop_queue(ndev);
4274+
stmmac_stop_all_queues(priv);
41954275

4196-
napi_disable(&priv->napi);
4276+
stmmac_disable_all_queues(priv);
41974277

41984278
/* Stop TX/RX DMA */
41994279
stmmac_stop_all_dma(priv);
@@ -4296,9 +4376,9 @@ int stmmac_resume(struct device *dev)
42964376
stmmac_init_tx_coalesce(priv);
42974377
stmmac_set_rx_mode(ndev);
42984378

4299-
napi_enable(&priv->napi);
4379+
stmmac_enable_all_queues(priv);
43004380

4301-
netif_start_queue(ndev);
4381+
stmmac_start_all_queues(priv);
43024382

43034383
spin_unlock_irqrestore(&priv->lock, flags);
43044384

0 commit comments

Comments
 (0)