Skip to content

Commit cbeaf7a

Browse files
Jakub Kicinskidavem330
authored andcommitted
nfp: bring back support for different ring counts
We used to always allocate the same number of TX and RX rings so the support for having r_vectors without one of the rings was dropped. That makes us, however, unnecessarily limited to 8 TX rings (8 is the Linux RSS default) most of the time. Also we are about to add channel count configuration via ethtool, so bring that support back. TX rings can now default to num_online_cpus() and RX rings to netif_get_num_default_rss_queues(). Signed-off-by: Jakub Kicinski <[email protected]> Signed-off-by: David S. Miller <[email protected]>
1 parent b33ae99 commit cbeaf7a

File tree

1 file changed

+71
-47
lines changed

1 file changed

+71
-47
lines changed

drivers/net/ethernet/netronome/nfp/nfp_net_common.c

Lines changed: 71 additions & 47 deletions
Original file line numberDiff line numberDiff line change
@@ -483,13 +483,13 @@ static void nfp_net_irqs_assign(struct net_device *netdev)
483483
struct nfp_net_r_vector *r_vec;
484484
int r;
485485

486-
/* Assumes nn->num_tx_rings == nn->num_rx_rings */
487-
if (nn->num_tx_rings > nn->num_r_vecs) {
488-
nn_warn(nn, "More rings (%d) than vectors (%d).\n",
489-
nn->num_tx_rings, nn->num_r_vecs);
490-
nn->num_tx_rings = nn->num_r_vecs;
491-
nn->num_rx_rings = nn->num_r_vecs;
492-
}
486+
if (nn->num_rx_rings > nn->num_r_vecs ||
487+
nn->num_tx_rings > nn->num_r_vecs)
488+
nn_warn(nn, "More rings (%d,%d) than vectors (%d).\n",
489+
nn->num_rx_rings, nn->num_tx_rings, nn->num_r_vecs);
490+
491+
nn->num_rx_rings = min(nn->num_r_vecs, nn->num_rx_rings);
492+
nn->num_tx_rings = min(nn->num_r_vecs, nn->num_tx_rings);
493493

494494
nn->lsc_handler = nfp_net_irq_lsc;
495495
nn->exn_handler = nfp_net_irq_exn;
@@ -1491,11 +1491,12 @@ static int nfp_net_poll(struct napi_struct *napi, int budget)
14911491
{
14921492
struct nfp_net_r_vector *r_vec =
14931493
container_of(napi, struct nfp_net_r_vector, napi);
1494-
unsigned int pkts_polled;
1495-
1496-
nfp_net_tx_complete(r_vec->tx_ring);
1494+
unsigned int pkts_polled = 0;
14971495

1498-
pkts_polled = nfp_net_rx(r_vec->rx_ring, budget);
1496+
if (r_vec->tx_ring)
1497+
nfp_net_tx_complete(r_vec->tx_ring);
1498+
if (r_vec->rx_ring)
1499+
pkts_polled = nfp_net_rx(r_vec->rx_ring, budget);
14991500

15001501
if (pkts_polled < budget) {
15011502
napi_complete_done(napi, pkts_polled);
@@ -1743,7 +1744,7 @@ nfp_net_shadow_rx_rings_free(struct nfp_net *nn, struct nfp_net_rx_ring *rings)
17431744
if (!rings)
17441745
return;
17451746

1746-
for (r = 0; r < nn->num_r_vecs; r++) {
1747+
for (r = 0; r < nn->num_rx_rings; r++) {
17471748
nfp_net_rx_ring_bufs_free(nn, &rings[r]);
17481749
nfp_net_rx_ring_free(&rings[r]);
17491750
}
@@ -1758,11 +1759,19 @@ nfp_net_prepare_vector(struct nfp_net *nn, struct nfp_net_r_vector *r_vec,
17581759
struct msix_entry *entry = &nn->irq_entries[r_vec->irq_idx];
17591760
int err;
17601761

1761-
r_vec->tx_ring = &nn->tx_rings[idx];
1762-
nfp_net_tx_ring_init(r_vec->tx_ring, r_vec, idx);
1762+
if (idx < nn->num_tx_rings) {
1763+
r_vec->tx_ring = &nn->tx_rings[idx];
1764+
nfp_net_tx_ring_init(r_vec->tx_ring, r_vec, idx);
1765+
} else {
1766+
r_vec->tx_ring = NULL;
1767+
}
17631768

1764-
r_vec->rx_ring = &nn->rx_rings[idx];
1765-
nfp_net_rx_ring_init(r_vec->rx_ring, r_vec, idx);
1769+
if (idx < nn->num_rx_rings) {
1770+
r_vec->rx_ring = &nn->rx_rings[idx];
1771+
nfp_net_rx_ring_init(r_vec->rx_ring, r_vec, idx);
1772+
} else {
1773+
r_vec->rx_ring = NULL;
1774+
}
17661775

17671776
snprintf(r_vec->name, sizeof(r_vec->name),
17681777
"%s-rxtx-%d", nn->netdev->name, idx);
@@ -1839,13 +1848,13 @@ void nfp_net_coalesce_write_cfg(struct nfp_net *nn)
18391848
/* copy RX interrupt coalesce parameters */
18401849
value = (nn->rx_coalesce_max_frames << 16) |
18411850
(factor * nn->rx_coalesce_usecs);
1842-
for (i = 0; i < nn->num_r_vecs; i++)
1851+
for (i = 0; i < nn->num_rx_rings; i++)
18431852
nn_writel(nn, NFP_NET_CFG_RXR_IRQ_MOD(i), value);
18441853

18451854
/* copy TX interrupt coalesce parameters */
18461855
value = (nn->tx_coalesce_max_frames << 16) |
18471856
(factor * nn->tx_coalesce_usecs);
1848-
for (i = 0; i < nn->num_r_vecs; i++)
1857+
for (i = 0; i < nn->num_tx_rings; i++)
18491858
nn_writel(nn, NFP_NET_CFG_TXR_IRQ_MOD(i), value);
18501859
}
18511860

@@ -1903,27 +1912,33 @@ static void nfp_net_clear_config_and_disable(struct nfp_net *nn)
19031912
if (err)
19041913
nn_err(nn, "Could not disable device: %d\n", err);
19051914

1906-
for (r = 0; r < nn->num_r_vecs; r++) {
1915+
for (r = 0; r < nn->num_rx_rings; r++)
19071916
nfp_net_rx_ring_reset(nn->r_vecs[r].rx_ring);
1917+
for (r = 0; r < nn->num_tx_rings; r++)
19081918
nfp_net_tx_ring_reset(nn, nn->r_vecs[r].tx_ring);
1919+
for (r = 0; r < nn->num_r_vecs; r++)
19091920
nfp_net_vec_clear_ring_data(nn, r);
1910-
}
19111921

19121922
nn->ctrl = new_ctrl;
19131923
}
19141924

19151925
static void
1916-
nfp_net_vec_write_ring_data(struct nfp_net *nn, struct nfp_net_r_vector *r_vec,
1917-
unsigned int idx)
1926+
nfp_net_rx_ring_hw_cfg_write(struct nfp_net *nn,
1927+
struct nfp_net_rx_ring *rx_ring, unsigned int idx)
19181928
{
19191929
/* Write the DMA address, size and MSI-X info to the device */
1920-
nn_writeq(nn, NFP_NET_CFG_RXR_ADDR(idx), r_vec->rx_ring->dma);
1921-
nn_writeb(nn, NFP_NET_CFG_RXR_SZ(idx), ilog2(r_vec->rx_ring->cnt));
1922-
nn_writeb(nn, NFP_NET_CFG_RXR_VEC(idx), r_vec->irq_idx);
1930+
nn_writeq(nn, NFP_NET_CFG_RXR_ADDR(idx), rx_ring->dma);
1931+
nn_writeb(nn, NFP_NET_CFG_RXR_SZ(idx), ilog2(rx_ring->cnt));
1932+
nn_writeb(nn, NFP_NET_CFG_RXR_VEC(idx), rx_ring->r_vec->irq_idx);
1933+
}
19231934

1924-
nn_writeq(nn, NFP_NET_CFG_TXR_ADDR(idx), r_vec->tx_ring->dma);
1925-
nn_writeb(nn, NFP_NET_CFG_TXR_SZ(idx), ilog2(r_vec->tx_ring->cnt));
1926-
nn_writeb(nn, NFP_NET_CFG_TXR_VEC(idx), r_vec->irq_idx);
1935+
static void
1936+
nfp_net_tx_ring_hw_cfg_write(struct nfp_net *nn,
1937+
struct nfp_net_tx_ring *tx_ring, unsigned int idx)
1938+
{
1939+
nn_writeq(nn, NFP_NET_CFG_TXR_ADDR(idx), tx_ring->dma);
1940+
nn_writeb(nn, NFP_NET_CFG_TXR_SZ(idx), ilog2(tx_ring->cnt));
1941+
nn_writeb(nn, NFP_NET_CFG_TXR_VEC(idx), tx_ring->r_vec->irq_idx);
19271942
}
19281943

19291944
static int __nfp_net_set_config_and_enable(struct nfp_net *nn)
@@ -1948,8 +1963,10 @@ static int __nfp_net_set_config_and_enable(struct nfp_net *nn)
19481963
update |= NFP_NET_CFG_UPDATE_IRQMOD;
19491964
}
19501965

1951-
for (r = 0; r < nn->num_r_vecs; r++)
1952-
nfp_net_vec_write_ring_data(nn, &nn->r_vecs[r], r);
1966+
for (r = 0; r < nn->num_tx_rings; r++)
1967+
nfp_net_tx_ring_hw_cfg_write(nn, &nn->tx_rings[r], r);
1968+
for (r = 0; r < nn->num_rx_rings; r++)
1969+
nfp_net_rx_ring_hw_cfg_write(nn, &nn->rx_rings[r], r);
19531970

19541971
nn_writeq(nn, NFP_NET_CFG_TXRS_ENABLE, nn->num_tx_rings == 64 ?
19551972
0xffffffffffffffffULL : ((u64)1 << nn->num_tx_rings) - 1);
@@ -1975,7 +1992,7 @@ static int __nfp_net_set_config_and_enable(struct nfp_net *nn)
19751992

19761993
nn->ctrl = new_ctrl;
19771994

1978-
for (r = 0; r < nn->num_r_vecs; r++)
1995+
for (r = 0; r < nn->num_rx_rings; r++)
19791996
nfp_net_rx_ring_fill_freelist(nn->r_vecs[r].rx_ring);
19801997

19811998
/* Since reconfiguration requests while NFP is down are ignored we
@@ -2067,20 +2084,22 @@ static int nfp_net_netdev_open(struct net_device *netdev)
20672084
for (r = 0; r < nn->num_r_vecs; r++) {
20682085
err = nfp_net_prepare_vector(nn, &nn->r_vecs[r], r);
20692086
if (err)
2070-
goto err_free_prev_vecs;
2071-
2087+
goto err_cleanup_vec_p;
2088+
}
2089+
for (r = 0; r < nn->num_tx_rings; r++) {
20722090
err = nfp_net_tx_ring_alloc(nn->r_vecs[r].tx_ring, nn->txd_cnt);
20732091
if (err)
2074-
goto err_cleanup_vec_p;
2075-
2092+
goto err_free_tx_ring_p;
2093+
}
2094+
for (r = 0; r < nn->num_rx_rings; r++) {
20762095
err = nfp_net_rx_ring_alloc(nn->r_vecs[r].rx_ring,
20772096
nn->fl_bufsz, nn->rxd_cnt);
20782097
if (err)
2079-
goto err_free_tx_ring_p;
2098+
goto err_flush_free_rx_ring_p;
20802099

20812100
err = nfp_net_rx_ring_bufs_alloc(nn, nn->r_vecs[r].rx_ring);
20822101
if (err)
2083-
goto err_flush_rx_ring_p;
2102+
goto err_free_rx_ring_p;
20842103
}
20852104

20862105
err = netif_set_real_num_tx_queues(netdev, nn->num_tx_rings);
@@ -2113,17 +2132,21 @@ static int nfp_net_netdev_open(struct net_device *netdev)
21132132
return 0;
21142133

21152134
err_free_rings:
2116-
r = nn->num_r_vecs;
2117-
err_free_prev_vecs:
2135+
r = nn->num_rx_rings;
2136+
err_flush_free_rx_ring_p:
21182137
while (r--) {
21192138
nfp_net_rx_ring_bufs_free(nn, nn->r_vecs[r].rx_ring);
2120-
err_flush_rx_ring_p:
2139+
err_free_rx_ring_p:
21212140
nfp_net_rx_ring_free(nn->r_vecs[r].rx_ring);
2141+
}
2142+
r = nn->num_tx_rings;
21222143
err_free_tx_ring_p:
2144+
while (r--)
21232145
nfp_net_tx_ring_free(nn->r_vecs[r].tx_ring);
2146+
r = nn->num_r_vecs;
21242147
err_cleanup_vec_p:
2148+
while (r--)
21252149
nfp_net_cleanup_vector(nn, &nn->r_vecs[r]);
2126-
}
21272150
kfree(nn->tx_rings);
21282151
err_free_rx_rings:
21292152
kfree(nn->rx_rings);
@@ -2162,12 +2185,14 @@ static void nfp_net_close_free_all(struct nfp_net *nn)
21622185
{
21632186
unsigned int r;
21642187

2165-
for (r = 0; r < nn->num_r_vecs; r++) {
2188+
for (r = 0; r < nn->num_rx_rings; r++) {
21662189
nfp_net_rx_ring_bufs_free(nn, nn->r_vecs[r].rx_ring);
21672190
nfp_net_rx_ring_free(nn->r_vecs[r].rx_ring);
2191+
}
2192+
for (r = 0; r < nn->num_tx_rings; r++)
21682193
nfp_net_tx_ring_free(nn->r_vecs[r].tx_ring);
2194+
for (r = 0; r < nn->num_r_vecs; r++)
21692195
nfp_net_cleanup_vector(nn, &nn->r_vecs[r]);
2170-
}
21712196

21722197
kfree(nn->rx_rings);
21732198
kfree(nn->tx_rings);
@@ -2686,7 +2711,6 @@ struct nfp_net *nfp_net_netdev_alloc(struct pci_dev *pdev,
26862711
{
26872712
struct net_device *netdev;
26882713
struct nfp_net *nn;
2689-
int nqs;
26902714

26912715
netdev = alloc_etherdev_mqs(sizeof(struct nfp_net),
26922716
max_tx_rings, max_rx_rings);
@@ -2702,9 +2726,9 @@ struct nfp_net *nfp_net_netdev_alloc(struct pci_dev *pdev,
27022726
nn->max_tx_rings = max_tx_rings;
27032727
nn->max_rx_rings = max_rx_rings;
27042728

2705-
nqs = netif_get_num_default_rss_queues();
2706-
nn->num_tx_rings = min_t(int, nqs, max_tx_rings);
2707-
nn->num_rx_rings = min_t(int, nqs, max_rx_rings);
2729+
nn->num_tx_rings = min_t(unsigned int, max_tx_rings, num_online_cpus());
2730+
nn->num_rx_rings = min_t(unsigned int, max_rx_rings,
2731+
netif_get_num_default_rss_queues());
27082732

27092733
nn->num_r_vecs = max(nn->num_tx_rings, nn->num_rx_rings);
27102734
nn->num_r_vecs = min_t(unsigned int, nn->num_r_vecs, num_online_cpus());

0 commit comments

Comments
 (0)