@@ -483,13 +483,13 @@ static void nfp_net_irqs_assign(struct net_device *netdev)
483
483
struct nfp_net_r_vector * r_vec ;
484
484
int r ;
485
485
486
- /* Assumes nn->num_tx_rings == nn->num_rx_rings */
487
- if ( nn -> num_tx_rings > nn -> num_r_vecs ) {
488
- nn_warn (nn , "More rings (%d) than vectors (%d).\n" ,
489
- nn -> num_tx_rings , nn -> num_r_vecs );
490
- nn -> num_tx_rings = nn -> num_r_vecs ;
491
- nn -> num_rx_rings = nn -> num_r_vecs ;
492
- }
486
+ if ( nn -> num_rx_rings > nn -> num_r_vecs ||
487
+ nn -> num_tx_rings > nn -> num_r_vecs )
488
+ nn_warn (nn , "More rings (%d,%d ) than vectors (%d).\n" ,
489
+ nn -> num_rx_rings , nn -> num_tx_rings , nn -> num_r_vecs );
490
+
491
+ nn -> num_rx_rings = min ( nn -> num_r_vecs , nn -> num_rx_rings ) ;
492
+ nn -> num_tx_rings = min ( nn -> num_r_vecs , nn -> num_tx_rings );
493
493
494
494
nn -> lsc_handler = nfp_net_irq_lsc ;
495
495
nn -> exn_handler = nfp_net_irq_exn ;
@@ -1491,11 +1491,12 @@ static int nfp_net_poll(struct napi_struct *napi, int budget)
1491
1491
{
1492
1492
struct nfp_net_r_vector * r_vec =
1493
1493
container_of (napi , struct nfp_net_r_vector , napi );
1494
- unsigned int pkts_polled ;
1495
-
1496
- nfp_net_tx_complete (r_vec -> tx_ring );
1494
+ unsigned int pkts_polled = 0 ;
1497
1495
1498
- pkts_polled = nfp_net_rx (r_vec -> rx_ring , budget );
1496
+ if (r_vec -> tx_ring )
1497
+ nfp_net_tx_complete (r_vec -> tx_ring );
1498
+ if (r_vec -> rx_ring )
1499
+ pkts_polled = nfp_net_rx (r_vec -> rx_ring , budget );
1499
1500
1500
1501
if (pkts_polled < budget ) {
1501
1502
napi_complete_done (napi , pkts_polled );
@@ -1743,7 +1744,7 @@ nfp_net_shadow_rx_rings_free(struct nfp_net *nn, struct nfp_net_rx_ring *rings)
1743
1744
if (!rings )
1744
1745
return ;
1745
1746
1746
- for (r = 0 ; r < nn -> num_r_vecs ; r ++ ) {
1747
+ for (r = 0 ; r < nn -> num_rx_rings ; r ++ ) {
1747
1748
nfp_net_rx_ring_bufs_free (nn , & rings [r ]);
1748
1749
nfp_net_rx_ring_free (& rings [r ]);
1749
1750
}
@@ -1758,11 +1759,19 @@ nfp_net_prepare_vector(struct nfp_net *nn, struct nfp_net_r_vector *r_vec,
1758
1759
struct msix_entry * entry = & nn -> irq_entries [r_vec -> irq_idx ];
1759
1760
int err ;
1760
1761
1761
- r_vec -> tx_ring = & nn -> tx_rings [idx ];
1762
- nfp_net_tx_ring_init (r_vec -> tx_ring , r_vec , idx );
1762
+ if (idx < nn -> num_tx_rings ) {
1763
+ r_vec -> tx_ring = & nn -> tx_rings [idx ];
1764
+ nfp_net_tx_ring_init (r_vec -> tx_ring , r_vec , idx );
1765
+ } else {
1766
+ r_vec -> tx_ring = NULL ;
1767
+ }
1763
1768
1764
- r_vec -> rx_ring = & nn -> rx_rings [idx ];
1765
- nfp_net_rx_ring_init (r_vec -> rx_ring , r_vec , idx );
1769
+ if (idx < nn -> num_rx_rings ) {
1770
+ r_vec -> rx_ring = & nn -> rx_rings [idx ];
1771
+ nfp_net_rx_ring_init (r_vec -> rx_ring , r_vec , idx );
1772
+ } else {
1773
+ r_vec -> rx_ring = NULL ;
1774
+ }
1766
1775
1767
1776
snprintf (r_vec -> name , sizeof (r_vec -> name ),
1768
1777
"%s-rxtx-%d" , nn -> netdev -> name , idx );
@@ -1839,13 +1848,13 @@ void nfp_net_coalesce_write_cfg(struct nfp_net *nn)
1839
1848
/* copy RX interrupt coalesce parameters */
1840
1849
value = (nn -> rx_coalesce_max_frames << 16 ) |
1841
1850
(factor * nn -> rx_coalesce_usecs );
1842
- for (i = 0 ; i < nn -> num_r_vecs ; i ++ )
1851
+ for (i = 0 ; i < nn -> num_rx_rings ; i ++ )
1843
1852
nn_writel (nn , NFP_NET_CFG_RXR_IRQ_MOD (i ), value );
1844
1853
1845
1854
/* copy TX interrupt coalesce parameters */
1846
1855
value = (nn -> tx_coalesce_max_frames << 16 ) |
1847
1856
(factor * nn -> tx_coalesce_usecs );
1848
- for (i = 0 ; i < nn -> num_r_vecs ; i ++ )
1857
+ for (i = 0 ; i < nn -> num_tx_rings ; i ++ )
1849
1858
nn_writel (nn , NFP_NET_CFG_TXR_IRQ_MOD (i ), value );
1850
1859
}
1851
1860
@@ -1903,27 +1912,33 @@ static void nfp_net_clear_config_and_disable(struct nfp_net *nn)
1903
1912
if (err )
1904
1913
nn_err (nn , "Could not disable device: %d\n" , err );
1905
1914
1906
- for (r = 0 ; r < nn -> num_r_vecs ; r ++ ) {
1915
+ for (r = 0 ; r < nn -> num_rx_rings ; r ++ )
1907
1916
nfp_net_rx_ring_reset (nn -> r_vecs [r ].rx_ring );
1917
+ for (r = 0 ; r < nn -> num_tx_rings ; r ++ )
1908
1918
nfp_net_tx_ring_reset (nn , nn -> r_vecs [r ].tx_ring );
1919
+ for (r = 0 ; r < nn -> num_r_vecs ; r ++ )
1909
1920
nfp_net_vec_clear_ring_data (nn , r );
1910
- }
1911
1921
1912
1922
nn -> ctrl = new_ctrl ;
1913
1923
}
1914
1924
1915
1925
static void
1916
- nfp_net_vec_write_ring_data (struct nfp_net * nn , struct nfp_net_r_vector * r_vec ,
1917
- unsigned int idx )
1926
+ nfp_net_rx_ring_hw_cfg_write (struct nfp_net * nn ,
1927
+ struct nfp_net_rx_ring * rx_ring , unsigned int idx )
1918
1928
{
1919
1929
/* Write the DMA address, size and MSI-X info to the device */
1920
- nn_writeq (nn , NFP_NET_CFG_RXR_ADDR (idx ), r_vec -> rx_ring -> dma );
1921
- nn_writeb (nn , NFP_NET_CFG_RXR_SZ (idx ), ilog2 (r_vec -> rx_ring -> cnt ));
1922
- nn_writeb (nn , NFP_NET_CFG_RXR_VEC (idx ), r_vec -> irq_idx );
1930
+ nn_writeq (nn , NFP_NET_CFG_RXR_ADDR (idx ), rx_ring -> dma );
1931
+ nn_writeb (nn , NFP_NET_CFG_RXR_SZ (idx ), ilog2 (rx_ring -> cnt ));
1932
+ nn_writeb (nn , NFP_NET_CFG_RXR_VEC (idx ), rx_ring -> r_vec -> irq_idx );
1933
+ }
1923
1934
1924
- nn_writeq (nn , NFP_NET_CFG_TXR_ADDR (idx ), r_vec -> tx_ring -> dma );
1925
- nn_writeb (nn , NFP_NET_CFG_TXR_SZ (idx ), ilog2 (r_vec -> tx_ring -> cnt ));
1926
- nn_writeb (nn , NFP_NET_CFG_TXR_VEC (idx ), r_vec -> irq_idx );
1935
+ static void
1936
+ nfp_net_tx_ring_hw_cfg_write (struct nfp_net * nn ,
1937
+ struct nfp_net_tx_ring * tx_ring , unsigned int idx )
1938
+ {
1939
+ nn_writeq (nn , NFP_NET_CFG_TXR_ADDR (idx ), tx_ring -> dma );
1940
+ nn_writeb (nn , NFP_NET_CFG_TXR_SZ (idx ), ilog2 (tx_ring -> cnt ));
1941
+ nn_writeb (nn , NFP_NET_CFG_TXR_VEC (idx ), tx_ring -> r_vec -> irq_idx );
1927
1942
}
1928
1943
1929
1944
static int __nfp_net_set_config_and_enable (struct nfp_net * nn )
@@ -1948,8 +1963,10 @@ static int __nfp_net_set_config_and_enable(struct nfp_net *nn)
1948
1963
update |= NFP_NET_CFG_UPDATE_IRQMOD ;
1949
1964
}
1950
1965
1951
- for (r = 0 ; r < nn -> num_r_vecs ; r ++ )
1952
- nfp_net_vec_write_ring_data (nn , & nn -> r_vecs [r ], r );
1966
+ for (r = 0 ; r < nn -> num_tx_rings ; r ++ )
1967
+ nfp_net_tx_ring_hw_cfg_write (nn , & nn -> tx_rings [r ], r );
1968
+ for (r = 0 ; r < nn -> num_rx_rings ; r ++ )
1969
+ nfp_net_rx_ring_hw_cfg_write (nn , & nn -> rx_rings [r ], r );
1953
1970
1954
1971
nn_writeq (nn , NFP_NET_CFG_TXRS_ENABLE , nn -> num_tx_rings == 64 ?
1955
1972
0xffffffffffffffffULL : ((u64 )1 << nn -> num_tx_rings ) - 1 );
@@ -1975,7 +1992,7 @@ static int __nfp_net_set_config_and_enable(struct nfp_net *nn)
1975
1992
1976
1993
nn -> ctrl = new_ctrl ;
1977
1994
1978
- for (r = 0 ; r < nn -> num_r_vecs ; r ++ )
1995
+ for (r = 0 ; r < nn -> num_rx_rings ; r ++ )
1979
1996
nfp_net_rx_ring_fill_freelist (nn -> r_vecs [r ].rx_ring );
1980
1997
1981
1998
/* Since reconfiguration requests while NFP is down are ignored we
@@ -2067,20 +2084,22 @@ static int nfp_net_netdev_open(struct net_device *netdev)
2067
2084
for (r = 0 ; r < nn -> num_r_vecs ; r ++ ) {
2068
2085
err = nfp_net_prepare_vector (nn , & nn -> r_vecs [r ], r );
2069
2086
if (err )
2070
- goto err_free_prev_vecs ;
2071
-
2087
+ goto err_cleanup_vec_p ;
2088
+ }
2089
+ for (r = 0 ; r < nn -> num_tx_rings ; r ++ ) {
2072
2090
err = nfp_net_tx_ring_alloc (nn -> r_vecs [r ].tx_ring , nn -> txd_cnt );
2073
2091
if (err )
2074
- goto err_cleanup_vec_p ;
2075
-
2092
+ goto err_free_tx_ring_p ;
2093
+ }
2094
+ for (r = 0 ; r < nn -> num_rx_rings ; r ++ ) {
2076
2095
err = nfp_net_rx_ring_alloc (nn -> r_vecs [r ].rx_ring ,
2077
2096
nn -> fl_bufsz , nn -> rxd_cnt );
2078
2097
if (err )
2079
- goto err_free_tx_ring_p ;
2098
+ goto err_flush_free_rx_ring_p ;
2080
2099
2081
2100
err = nfp_net_rx_ring_bufs_alloc (nn , nn -> r_vecs [r ].rx_ring );
2082
2101
if (err )
2083
- goto err_flush_rx_ring_p ;
2102
+ goto err_free_rx_ring_p ;
2084
2103
}
2085
2104
2086
2105
err = netif_set_real_num_tx_queues (netdev , nn -> num_tx_rings );
@@ -2113,17 +2132,21 @@ static int nfp_net_netdev_open(struct net_device *netdev)
2113
2132
return 0 ;
2114
2133
2115
2134
err_free_rings :
2116
- r = nn -> num_r_vecs ;
2117
- err_free_prev_vecs :
2135
+ r = nn -> num_rx_rings ;
2136
+ err_flush_free_rx_ring_p :
2118
2137
while (r -- ) {
2119
2138
nfp_net_rx_ring_bufs_free (nn , nn -> r_vecs [r ].rx_ring );
2120
- err_flush_rx_ring_p :
2139
+ err_free_rx_ring_p :
2121
2140
nfp_net_rx_ring_free (nn -> r_vecs [r ].rx_ring );
2141
+ }
2142
+ r = nn -> num_tx_rings ;
2122
2143
err_free_tx_ring_p :
2144
+ while (r -- )
2123
2145
nfp_net_tx_ring_free (nn -> r_vecs [r ].tx_ring );
2146
+ r = nn -> num_r_vecs ;
2124
2147
err_cleanup_vec_p :
2148
+ while (r -- )
2125
2149
nfp_net_cleanup_vector (nn , & nn -> r_vecs [r ]);
2126
- }
2127
2150
kfree (nn -> tx_rings );
2128
2151
err_free_rx_rings :
2129
2152
kfree (nn -> rx_rings );
@@ -2162,12 +2185,14 @@ static void nfp_net_close_free_all(struct nfp_net *nn)
2162
2185
{
2163
2186
unsigned int r ;
2164
2187
2165
- for (r = 0 ; r < nn -> num_r_vecs ; r ++ ) {
2188
+ for (r = 0 ; r < nn -> num_rx_rings ; r ++ ) {
2166
2189
nfp_net_rx_ring_bufs_free (nn , nn -> r_vecs [r ].rx_ring );
2167
2190
nfp_net_rx_ring_free (nn -> r_vecs [r ].rx_ring );
2191
+ }
2192
+ for (r = 0 ; r < nn -> num_tx_rings ; r ++ )
2168
2193
nfp_net_tx_ring_free (nn -> r_vecs [r ].tx_ring );
2194
+ for (r = 0 ; r < nn -> num_r_vecs ; r ++ )
2169
2195
nfp_net_cleanup_vector (nn , & nn -> r_vecs [r ]);
2170
- }
2171
2196
2172
2197
kfree (nn -> rx_rings );
2173
2198
kfree (nn -> tx_rings );
@@ -2686,7 +2711,6 @@ struct nfp_net *nfp_net_netdev_alloc(struct pci_dev *pdev,
2686
2711
{
2687
2712
struct net_device * netdev ;
2688
2713
struct nfp_net * nn ;
2689
- int nqs ;
2690
2714
2691
2715
netdev = alloc_etherdev_mqs (sizeof (struct nfp_net ),
2692
2716
max_tx_rings , max_rx_rings );
@@ -2702,9 +2726,9 @@ struct nfp_net *nfp_net_netdev_alloc(struct pci_dev *pdev,
2702
2726
nn -> max_tx_rings = max_tx_rings ;
2703
2727
nn -> max_rx_rings = max_rx_rings ;
2704
2728
2705
- nqs = netif_get_num_default_rss_queues ( );
2706
- nn -> num_tx_rings = min_t (int , nqs , max_tx_rings );
2707
- nn -> num_rx_rings = min_t ( int , nqs , max_rx_rings );
2729
+ nn -> num_tx_rings = min_t ( unsigned int , max_tx_rings , num_online_cpus () );
2730
+ nn -> num_rx_rings = min_t (unsigned int , max_rx_rings ,
2731
+ netif_get_num_default_rss_queues () );
2708
2732
2709
2733
nn -> num_r_vecs = max (nn -> num_tx_rings , nn -> num_rx_rings );
2710
2734
nn -> num_r_vecs = min_t (unsigned int , nn -> num_r_vecs , num_online_cpus ());
0 commit comments