@@ -790,10 +790,6 @@ static int cxgb4vf_open(struct net_device *dev)
790
790
/*
791
791
* Note that this interface is up and start everything up ...
792
792
*/
793
- netif_set_real_num_tx_queues (dev , pi -> nqsets );
794
- err = netif_set_real_num_rx_queues (dev , pi -> nqsets );
795
- if (err )
796
- goto err_unwind ;
797
793
err = link_start (dev );
798
794
if (err )
799
795
goto err_unwind ;
@@ -2176,17 +2172,82 @@ static void cleanup_debugfs(struct adapter *adapter)
2176
2172
/* nothing to do */
2177
2173
}
2178
2174
2175
+ /* Figure out how many Ports and Queue Sets we can support. This depends on
2176
+ * knowing our Virtual Function Resources and may be called a second time if
2177
+ * we fall back from MSI-X to MSI Interrupt Mode.
2178
+ */
2179
+ static void size_nports_qsets (struct adapter * adapter )
2180
+ {
2181
+ struct vf_resources * vfres = & adapter -> params .vfres ;
2182
+ unsigned int ethqsets , pmask_nports ;
2183
+
2184
+ /* The number of "ports" which we support is equal to the number of
2185
+ * Virtual Interfaces with which we've been provisioned.
2186
+ */
2187
+ adapter -> params .nports = vfres -> nvi ;
2188
+ if (adapter -> params .nports > MAX_NPORTS ) {
2189
+ dev_warn (adapter -> pdev_dev , "only using %d of %d maximum"
2190
+ " allowed virtual interfaces\n" , MAX_NPORTS ,
2191
+ adapter -> params .nports );
2192
+ adapter -> params .nports = MAX_NPORTS ;
2193
+ }
2194
+
2195
+ /* We may have been provisioned with more VIs than the number of
2196
+ * ports we're allowed to access (our Port Access Rights Mask).
2197
+ * This is obviously a configuration conflict but we don't want to
2198
+ * crash the kernel or anything silly just because of that.
2199
+ */
2200
+ pmask_nports = hweight32 (adapter -> params .vfres .pmask );
2201
+ if (pmask_nports < adapter -> params .nports ) {
2202
+ dev_warn (adapter -> pdev_dev , "only using %d of %d provissioned"
2203
+ " virtual interfaces; limited by Port Access Rights"
2204
+ " mask %#x\n" , pmask_nports , adapter -> params .nports ,
2205
+ adapter -> params .vfres .pmask );
2206
+ adapter -> params .nports = pmask_nports ;
2207
+ }
2208
+
2209
+ /* We need to reserve an Ingress Queue for the Asynchronous Firmware
2210
+ * Event Queue. And if we're using MSI Interrupts, we'll also need to
2211
+ * reserve an Ingress Queue for a Forwarded Interrupts.
2212
+ *
2213
+ * The rest of the FL/Intr-capable ingress queues will be matched up
2214
+ * one-for-one with Ethernet/Control egress queues in order to form
2215
+ * "Queue Sets" which will be aportioned between the "ports". For
2216
+ * each Queue Set, we'll need the ability to allocate two Egress
2217
+ * Contexts -- one for the Ingress Queue Free List and one for the TX
2218
+ * Ethernet Queue.
2219
+ *
2220
+ * Note that even if we're currently configured to use MSI-X
2221
+ * Interrupts (module variable msi == MSI_MSIX) we may get downgraded
2222
+ * to MSI Interrupts if we can't get enough MSI-X Interrupts. If that
2223
+ * happens we'll need to adjust things later.
2224
+ */
2225
+ ethqsets = vfres -> niqflint - 1 - (msi == MSI_MSI );
2226
+ if (vfres -> nethctrl != ethqsets )
2227
+ ethqsets = min (vfres -> nethctrl , ethqsets );
2228
+ if (vfres -> neq < ethqsets * 2 )
2229
+ ethqsets = vfres -> neq /2 ;
2230
+ if (ethqsets > MAX_ETH_QSETS )
2231
+ ethqsets = MAX_ETH_QSETS ;
2232
+ adapter -> sge .max_ethqsets = ethqsets ;
2233
+
2234
+ if (adapter -> sge .max_ethqsets < adapter -> params .nports ) {
2235
+ dev_warn (adapter -> pdev_dev , "only using %d of %d available"
2236
+ " virtual interfaces (too few Queue Sets)\n" ,
2237
+ adapter -> sge .max_ethqsets , adapter -> params .nports );
2238
+ adapter -> params .nports = adapter -> sge .max_ethqsets ;
2239
+ }
2240
+ }
2241
+
2179
2242
/*
2180
2243
* Perform early "adapter" initialization. This is where we discover what
2181
2244
* adapter parameters we're going to be using and initialize basic adapter
2182
2245
* hardware support.
2183
2246
*/
2184
2247
static int adap_init0 (struct adapter * adapter )
2185
2248
{
2186
- struct vf_resources * vfres = & adapter -> params .vfres ;
2187
2249
struct sge_params * sge_params = & adapter -> params .sge ;
2188
2250
struct sge * s = & adapter -> sge ;
2189
- unsigned int ethqsets ;
2190
2251
int err ;
2191
2252
u32 param , val = 0 ;
2192
2253
@@ -2295,69 +2356,23 @@ static int adap_init0(struct adapter *adapter)
2295
2356
return err ;
2296
2357
}
2297
2358
2298
- /*
2299
- * The number of "ports" which we support is equal to the number of
2300
- * Virtual Interfaces with which we've been provisioned.
2301
- */
2302
- adapter -> params .nports = vfres -> nvi ;
2303
- if (adapter -> params .nports > MAX_NPORTS ) {
2304
- dev_warn (adapter -> pdev_dev , "only using %d of %d allowed"
2305
- " virtual interfaces\n" , MAX_NPORTS ,
2306
- adapter -> params .nports );
2307
- adapter -> params .nports = MAX_NPORTS ;
2308
- }
2309
-
2310
- /*
2311
- * We need to reserve a number of the ingress queues with Free List
2312
- * and Interrupt capabilities for special interrupt purposes (like
2313
- * asynchronous firmware messages, or forwarded interrupts if we're
2314
- * using MSI). The rest of the FL/Intr-capable ingress queues will be
2315
- * matched up one-for-one with Ethernet/Control egress queues in order
2316
- * to form "Queue Sets" which will be aportioned between the "ports".
2317
- * For each Queue Set, we'll need the ability to allocate two Egress
2318
- * Contexts -- one for the Ingress Queue Free List and one for the TX
2319
- * Ethernet Queue.
2320
- */
2321
- ethqsets = vfres -> niqflint - INGQ_EXTRAS ;
2322
- if (vfres -> nethctrl != ethqsets ) {
2323
- dev_warn (adapter -> pdev_dev , "unequal number of [available]"
2324
- " ingress/egress queues (%d/%d); using minimum for"
2325
- " number of Queue Sets\n" , ethqsets , vfres -> nethctrl );
2326
- ethqsets = min (vfres -> nethctrl , ethqsets );
2327
- }
2328
- if (vfres -> neq < ethqsets * 2 ) {
2329
- dev_warn (adapter -> pdev_dev , "Not enough Egress Contexts (%d)"
2330
- " to support Queue Sets (%d); reducing allowed Queue"
2331
- " Sets\n" , vfres -> neq , ethqsets );
2332
- ethqsets = vfres -> neq /2 ;
2333
- }
2334
- if (ethqsets > MAX_ETH_QSETS ) {
2335
- dev_warn (adapter -> pdev_dev , "only using %d of %d allowed Queue"
2336
- " Sets\n" , MAX_ETH_QSETS , adapter -> sge .max_ethqsets );
2337
- ethqsets = MAX_ETH_QSETS ;
2338
- }
2339
- if (vfres -> niq != 0 || vfres -> neq > ethqsets * 2 ) {
2340
- dev_warn (adapter -> pdev_dev , "unused resources niq/neq (%d/%d)"
2341
- " ignored\n" , vfres -> niq , vfres -> neq - ethqsets * 2 );
2342
- }
2343
- adapter -> sge .max_ethqsets = ethqsets ;
2344
-
2345
- /*
2346
- * Check for various parameter sanity issues. Most checks simply
2347
- * result in us using fewer resources than our provissioning but we
2348
- * do need at least one "port" with which to work ...
2349
- */
2350
- if (adapter -> sge .max_ethqsets < adapter -> params .nports ) {
2351
- dev_warn (adapter -> pdev_dev , "only using %d of %d available"
2352
- " virtual interfaces (too few Queue Sets)\n" ,
2353
- adapter -> sge .max_ethqsets , adapter -> params .nports );
2354
- adapter -> params .nports = adapter -> sge .max_ethqsets ;
2359
+ /* Check for various parameter sanity issues */
2360
+ if (adapter -> params .vfres .pmask == 0 ) {
2361
+ dev_err (adapter -> pdev_dev , "no port access configured\n"
2362
+ "usable!\n" );
2363
+ return - EINVAL ;
2355
2364
}
2356
- if (adapter -> params .nports == 0 ) {
2365
+ if (adapter -> params .vfres . nvi == 0 ) {
2357
2366
dev_err (adapter -> pdev_dev , "no virtual interfaces configured/"
2358
2367
"usable!\n" );
2359
2368
return - EINVAL ;
2360
2369
}
2370
+
2371
+ /* Initialize nports and max_ethqsets now that we have our Virtual
2372
+ * Function Resources.
2373
+ */
2374
+ size_nports_qsets (adapter );
2375
+
2361
2376
return 0 ;
2362
2377
}
2363
2378
@@ -2771,17 +2786,55 @@ static int cxgb4vf_pci_probe(struct pci_dev *pdev,
2771
2786
}
2772
2787
}
2773
2788
2789
+ /* See what interrupts we'll be using. If we've been configured to
2790
+ * use MSI-X interrupts, try to enable them but fall back to using
2791
+ * MSI interrupts if we can't enable MSI-X interrupts. If we can't
2792
+ * get MSI interrupts we bail with the error.
2793
+ */
2794
+ if (msi == MSI_MSIX && enable_msix (adapter ) == 0 )
2795
+ adapter -> flags |= USING_MSIX ;
2796
+ else {
2797
+ if (msi == MSI_MSIX ) {
2798
+ dev_info (adapter -> pdev_dev ,
2799
+ "Unable to use MSI-X Interrupts; falling "
2800
+ "back to MSI Interrupts\n" );
2801
+
2802
+ /* We're going to need a Forwarded Interrupt Queue so
2803
+ * that may cut into how many Queue Sets we can
2804
+ * support.
2805
+ */
2806
+ msi = MSI_MSI ;
2807
+ size_nports_qsets (adapter );
2808
+ }
2809
+ err = pci_enable_msi (pdev );
2810
+ if (err ) {
2811
+ dev_err (& pdev -> dev , "Unable to allocate MSI Interrupts;"
2812
+ " err=%d\n" , err );
2813
+ goto err_free_dev ;
2814
+ }
2815
+ adapter -> flags |= USING_MSI ;
2816
+ }
2817
+
2818
+ /* Now that we know how many "ports" we have and what interrupt
2819
+ * mechanism we're going to use, we can configure our queue resources.
2820
+ */
2821
+ cfg_queues (adapter );
2822
+
2774
2823
/*
2775
2824
* The "card" is now ready to go. If any errors occur during device
2776
2825
* registration we do not fail the whole "card" but rather proceed
2777
2826
* only with the ports we manage to register successfully. However we
2778
2827
* must register at least one net device.
2779
2828
*/
2780
2829
for_each_port (adapter , pidx ) {
2830
+ struct port_info * pi = netdev_priv (adapter -> port [pidx ]);
2781
2831
netdev = adapter -> port [pidx ];
2782
2832
if (netdev == NULL )
2783
2833
continue ;
2784
2834
2835
+ netif_set_real_num_tx_queues (netdev , pi -> nqsets );
2836
+ netif_set_real_num_rx_queues (netdev , pi -> nqsets );
2837
+
2785
2838
err = register_netdev (netdev );
2786
2839
if (err ) {
2787
2840
dev_warn (& pdev -> dev , "cannot register net device %s,"
@@ -2793,7 +2846,7 @@ static int cxgb4vf_pci_probe(struct pci_dev *pdev,
2793
2846
}
2794
2847
if (adapter -> registered_device_map == 0 ) {
2795
2848
dev_err (& pdev -> dev , "could not register any net devices\n" );
2796
- goto err_free_dev ;
2849
+ goto err_disable_interrupts ;
2797
2850
}
2798
2851
2799
2852
/*
@@ -2810,32 +2863,6 @@ static int cxgb4vf_pci_probe(struct pci_dev *pdev,
2810
2863
setup_debugfs (adapter );
2811
2864
}
2812
2865
2813
- /*
2814
- * See what interrupts we'll be using. If we've been configured to
2815
- * use MSI-X interrupts, try to enable them but fall back to using
2816
- * MSI interrupts if we can't enable MSI-X interrupts. If we can't
2817
- * get MSI interrupts we bail with the error.
2818
- */
2819
- if (msi == MSI_MSIX && enable_msix (adapter ) == 0 )
2820
- adapter -> flags |= USING_MSIX ;
2821
- else {
2822
- err = pci_enable_msi (pdev );
2823
- if (err ) {
2824
- dev_err (& pdev -> dev , "Unable to allocate %s interrupts;"
2825
- " err=%d\n" ,
2826
- msi == MSI_MSIX ? "MSI-X or MSI" : "MSI" , err );
2827
- goto err_free_debugfs ;
2828
- }
2829
- adapter -> flags |= USING_MSI ;
2830
- }
2831
-
2832
- /*
2833
- * Now that we know how many "ports" we have and what their types are,
2834
- * and how many Queue Sets we can support, we can configure our queue
2835
- * resources.
2836
- */
2837
- cfg_queues (adapter );
2838
-
2839
2866
/*
2840
2867
* Print a short notice on the existence and configuration of the new
2841
2868
* VF network device ...
@@ -2856,11 +2883,13 @@ static int cxgb4vf_pci_probe(struct pci_dev *pdev,
2856
2883
* Error recovery and exit code. Unwind state that's been created
2857
2884
* so far and return the error.
2858
2885
*/
2859
-
2860
- err_free_debugfs :
2861
- if (!IS_ERR_OR_NULL (adapter -> debugfs_root )) {
2862
- cleanup_debugfs (adapter );
2863
- debugfs_remove_recursive (adapter -> debugfs_root );
2886
+ err_disable_interrupts :
2887
+ if (adapter -> flags & USING_MSIX ) {
2888
+ pci_disable_msix (adapter -> pdev );
2889
+ adapter -> flags &= ~USING_MSIX ;
2890
+ } else if (adapter -> flags & USING_MSI ) {
2891
+ pci_disable_msi (adapter -> pdev );
2892
+ adapter -> flags &= ~USING_MSI ;
2864
2893
}
2865
2894
2866
2895
err_free_dev :
0 commit comments