@@ -5344,15 +5344,11 @@ static int ixgbe_fwd_ring_up(struct ixgbe_adapter *adapter,
5344
5344
struct net_device * vdev = accel -> netdev ;
5345
5345
int i , baseq , err ;
5346
5346
5347
- if (!test_bit (accel -> pool , adapter -> fwd_bitmask ))
5348
- return 0 ;
5349
-
5350
5347
baseq = accel -> pool * adapter -> num_rx_queues_per_pool ;
5351
5348
netdev_dbg (vdev , "pool %i:%i queues %i:%i\n" ,
5352
5349
accel -> pool , adapter -> num_rx_pools ,
5353
5350
baseq , baseq + adapter -> num_rx_queues_per_pool );
5354
5351
5355
- accel -> netdev = vdev ;
5356
5352
accel -> rx_base_queue = baseq ;
5357
5353
accel -> tx_base_queue = baseq ;
5358
5354
@@ -5372,9 +5368,17 @@ static int ixgbe_fwd_ring_up(struct ixgbe_adapter *adapter,
5372
5368
if (err >= 0 )
5373
5369
return 0 ;
5374
5370
5371
+ /* if we cannot add the MAC rule then disable the offload */
5372
+ macvlan_release_l2fw_offload (vdev );
5373
+
5375
5374
for (i = 0 ; i < adapter -> num_rx_queues_per_pool ; i ++ )
5376
5375
adapter -> rx_ring [baseq + i ]-> netdev = NULL ;
5377
5376
5377
+ netdev_err (vdev , "L2FW offload disabled due to L2 filter error\n" );
5378
+
5379
+ clear_bit (accel -> pool , adapter -> fwd_bitmask );
5380
+ kfree (accel );
5381
+
5378
5382
return err ;
5379
5383
}
5380
5384
@@ -8799,6 +8803,49 @@ static void ixgbe_set_prio_tc_map(struct ixgbe_adapter *adapter)
8799
8803
}
8800
8804
8801
8805
#endif /* CONFIG_IXGBE_DCB */
8806
+ static int ixgbe_reassign_macvlan_pool (struct net_device * vdev , void * data )
8807
+ {
8808
+ struct ixgbe_adapter * adapter = data ;
8809
+ struct ixgbe_fwd_adapter * accel ;
8810
+ int pool ;
8811
+
8812
+ /* we only care about macvlans... */
8813
+ if (!netif_is_macvlan (vdev ))
8814
+ return 0 ;
8815
+
8816
+ /* that have hardware offload enabled... */
8817
+ accel = macvlan_accel_priv (vdev );
8818
+ if (!accel )
8819
+ return 0 ;
8820
+
8821
+ /* If we can relocate to a different bit do so */
8822
+ pool = find_first_zero_bit (adapter -> fwd_bitmask , adapter -> num_rx_pools );
8823
+ if (pool < adapter -> num_rx_pools ) {
8824
+ set_bit (pool , adapter -> fwd_bitmask );
8825
+ accel -> pool = pool ;
8826
+ return 0 ;
8827
+ }
8828
+
8829
+ /* if we cannot find a free pool then disable the offload */
8830
+ netdev_err (vdev , "L2FW offload disabled due to lack of queue resources\n" );
8831
+ macvlan_release_l2fw_offload (vdev );
8832
+ kfree (accel );
8833
+
8834
+ return 0 ;
8835
+ }
8836
+
8837
+ static void ixgbe_defrag_macvlan_pools (struct net_device * dev )
8838
+ {
8839
+ struct ixgbe_adapter * adapter = netdev_priv (dev );
8840
+
8841
+ /* flush any stale bits out of the fwd bitmask */
8842
+ bitmap_clear (adapter -> fwd_bitmask , 1 , 63 );
8843
+
8844
+ /* walk through upper devices reassigning pools */
8845
+ netdev_walk_all_upper_dev_rcu (dev , ixgbe_reassign_macvlan_pool ,
8846
+ adapter );
8847
+ }
8848
+
8802
8849
/**
8803
8850
* ixgbe_setup_tc - configure net_device for multiple traffic classes
8804
8851
*
@@ -8866,6 +8913,8 @@ int ixgbe_setup_tc(struct net_device *dev, u8 tc)
8866
8913
#endif /* CONFIG_IXGBE_DCB */
8867
8914
ixgbe_init_interrupt_scheme (adapter );
8868
8915
8916
+ ixgbe_defrag_macvlan_pools (dev );
8917
+
8869
8918
if (netif_running (dev ))
8870
8919
return ixgbe_open (dev );
8871
8920
@@ -9415,6 +9464,22 @@ static netdev_features_t ixgbe_fix_features(struct net_device *netdev,
9415
9464
return features ;
9416
9465
}
9417
9466
9467
+ static void ixgbe_reset_l2fw_offload (struct ixgbe_adapter * adapter )
9468
+ {
9469
+ int rss = min_t (int , ixgbe_max_rss_indices (adapter ),
9470
+ num_online_cpus ());
9471
+
9472
+ /* go back to full RSS if we're not running SR-IOV */
9473
+ if (!adapter -> ring_feature [RING_F_VMDQ ].offset )
9474
+ adapter -> flags &= ~(IXGBE_FLAG_VMDQ_ENABLED |
9475
+ IXGBE_FLAG_SRIOV_ENABLED );
9476
+
9477
+ adapter -> ring_feature [RING_F_RSS ].limit = rss ;
9478
+ adapter -> ring_feature [RING_F_VMDQ ].limit = 1 ;
9479
+
9480
+ ixgbe_setup_tc (adapter -> netdev , adapter -> hw_tcs );
9481
+ }
9482
+
9418
9483
static int ixgbe_set_features (struct net_device * netdev ,
9419
9484
netdev_features_t features )
9420
9485
{
@@ -9495,7 +9560,9 @@ static int ixgbe_set_features(struct net_device *netdev,
9495
9560
}
9496
9561
}
9497
9562
9498
- if (need_reset )
9563
+ if ((changed & NETIF_F_HW_L2FW_DOFFLOAD ) && adapter -> num_rx_pools > 1 )
9564
+ ixgbe_reset_l2fw_offload (adapter );
9565
+ else if (need_reset )
9499
9566
ixgbe_do_reset (netdev );
9500
9567
else if (changed & (NETIF_F_HW_VLAN_CTAG_RX |
9501
9568
NETIF_F_HW_VLAN_CTAG_FILTER ))
@@ -9758,11 +9825,9 @@ static int ixgbe_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
9758
9825
9759
9826
static void * ixgbe_fwd_add (struct net_device * pdev , struct net_device * vdev )
9760
9827
{
9761
- struct ixgbe_fwd_adapter * fwd_adapter = NULL ;
9762
9828
struct ixgbe_adapter * adapter = netdev_priv (pdev );
9763
- int used_pools = adapter -> num_vfs + adapter -> num_rx_pools ;
9829
+ struct ixgbe_fwd_adapter * accel ;
9764
9830
int tcs = adapter -> hw_tcs ? : 1 ;
9765
- unsigned int limit ;
9766
9831
int pool , err ;
9767
9832
9768
9833
/* The hardware supported by ixgbe only filters on the destination MAC
@@ -9772,55 +9837,81 @@ static void *ixgbe_fwd_add(struct net_device *pdev, struct net_device *vdev)
9772
9837
if (!macvlan_supports_dest_filter (vdev ))
9773
9838
return ERR_PTR (- EMEDIUMTYPE );
9774
9839
9775
- /* Hardware has a limited number of available pools. Each VF, and the
9776
- * PF require a pool. Check to ensure we don't attempt to use more
9777
- * then the available number of pools.
9778
- */
9779
- if (used_pools >= IXGBE_MAX_VF_FUNCTIONS )
9780
- return ERR_PTR (- EINVAL );
9840
+ pool = find_first_zero_bit (adapter -> fwd_bitmask , adapter -> num_rx_pools );
9841
+ if (pool == adapter -> num_rx_pools ) {
9842
+ u16 used_pools = adapter -> num_vfs + adapter -> num_rx_pools ;
9843
+ u16 reserved_pools ;
9844
+
9845
+ if (((adapter -> flags & IXGBE_FLAG_DCB_ENABLED ) &&
9846
+ adapter -> num_rx_pools >= (MAX_TX_QUEUES / tcs )) ||
9847
+ adapter -> num_rx_pools > IXGBE_MAX_MACVLANS )
9848
+ return ERR_PTR (- EBUSY );
9849
+
9850
+ /* Hardware has a limited number of available pools. Each VF,
9851
+ * and the PF require a pool. Check to ensure we don't
9852
+ * attempt to use more then the available number of pools.
9853
+ */
9854
+ if (used_pools >= IXGBE_MAX_VF_FUNCTIONS )
9855
+ return ERR_PTR (- EBUSY );
9781
9856
9782
- if (((adapter -> flags & IXGBE_FLAG_DCB_ENABLED ) &&
9783
- adapter -> num_rx_pools >= (MAX_TX_QUEUES / tcs )) ||
9784
- (adapter -> num_rx_pools > IXGBE_MAX_MACVLANS ))
9785
- return ERR_PTR (- EBUSY );
9857
+ /* Enable VMDq flag so device will be set in VM mode */
9858
+ adapter -> flags |= IXGBE_FLAG_VMDQ_ENABLED |
9859
+ IXGBE_FLAG_SRIOV_ENABLED ;
9786
9860
9787
- fwd_adapter = kzalloc (sizeof (* fwd_adapter ), GFP_KERNEL );
9788
- if (!fwd_adapter )
9789
- return ERR_PTR (- ENOMEM );
9861
+ /* Try to reserve as many queues per pool as possible,
9862
+ * we start with the configurations that support 4 queues
9863
+ * per pools, followed by 2, and then by just 1 per pool.
9864
+ */
9865
+ if (used_pools < 32 && adapter -> num_rx_pools < 16 )
9866
+ reserved_pools = min_t (u16 ,
9867
+ 32 - used_pools ,
9868
+ 16 - adapter -> num_rx_pools );
9869
+ else if (adapter -> num_rx_pools < 32 )
9870
+ reserved_pools = min_t (u16 ,
9871
+ 64 - used_pools ,
9872
+ 32 - adapter -> num_rx_pools );
9873
+ else
9874
+ reserved_pools = 64 - used_pools ;
9790
9875
9791
- pool = find_first_zero_bit (adapter -> fwd_bitmask , adapter -> num_rx_pools );
9792
- set_bit (pool , adapter -> fwd_bitmask );
9793
- limit = find_last_bit (adapter -> fwd_bitmask , adapter -> num_rx_pools + 1 );
9794
9876
9795
- /* Enable VMDq flag so device will be set in VM mode */
9796
- adapter -> flags |= IXGBE_FLAG_VMDQ_ENABLED | IXGBE_FLAG_SRIOV_ENABLED ;
9797
- adapter -> ring_feature [RING_F_VMDQ ].limit = limit + 1 ;
9877
+ if (!reserved_pools )
9878
+ return ERR_PTR (- EBUSY );
9798
9879
9799
- fwd_adapter -> pool = pool ;
9880
+ adapter -> ring_feature [ RING_F_VMDQ ]. limit += reserved_pools ;
9800
9881
9801
- /* Force reinit of ring allocation with VMDQ enabled */
9802
- err = ixgbe_setup_tc (pdev , adapter -> hw_tcs );
9882
+ /* Force reinit of ring allocation with VMDQ enabled */
9883
+ err = ixgbe_setup_tc (pdev , adapter -> hw_tcs );
9884
+ if (err )
9885
+ return ERR_PTR (err );
9803
9886
9804
- if (!err && netif_running (pdev ))
9805
- err = ixgbe_fwd_ring_up (adapter , fwd_adapter );
9887
+ if (pool >= adapter -> num_rx_pools )
9888
+ return ERR_PTR (- ENOMEM );
9889
+ }
9806
9890
9807
- if (!err )
9808
- return fwd_adapter ;
9891
+ accel = kzalloc (sizeof (* accel ), GFP_KERNEL );
9892
+ if (!accel )
9893
+ return ERR_PTR (- ENOMEM );
9894
+
9895
+ set_bit (pool , adapter -> fwd_bitmask );
9896
+ accel -> pool = pool ;
9897
+ accel -> netdev = vdev ;
9809
9898
9810
- /* unwind counter and free adapter struct */
9811
- netdev_info (pdev ,
9812
- "%s: dfwd hardware acceleration failed\n" , vdev -> name );
9813
- clear_bit (pool , adapter -> fwd_bitmask );
9814
- kfree (fwd_adapter );
9815
- return ERR_PTR (err );
9899
+ if (!netif_running (pdev ))
9900
+ return accel ;
9901
+
9902
+ err = ixgbe_fwd_ring_up (adapter , accel );
9903
+ if (err )
9904
+ return ERR_PTR (err );
9905
+
9906
+ return accel ;
9816
9907
}
9817
9908
9818
9909
static void ixgbe_fwd_del (struct net_device * pdev , void * priv )
9819
9910
{
9820
9911
struct ixgbe_fwd_adapter * accel = priv ;
9821
9912
struct ixgbe_adapter * adapter = netdev_priv (pdev );
9822
9913
unsigned int rxbase = accel -> rx_base_queue ;
9823
- unsigned int limit , i ;
9914
+ unsigned int i ;
9824
9915
9825
9916
/* delete unicast filter associated with offloaded interface */
9826
9917
ixgbe_del_mac_filter (adapter , accel -> netdev -> dev_addr ,
@@ -9844,25 +9935,6 @@ static void ixgbe_fwd_del(struct net_device *pdev, void *priv)
9844
9935
}
9845
9936
9846
9937
clear_bit (accel -> pool , adapter -> fwd_bitmask );
9847
- limit = find_last_bit (adapter -> fwd_bitmask , adapter -> num_rx_pools );
9848
- adapter -> ring_feature [RING_F_VMDQ ].limit = limit + 1 ;
9849
-
9850
- /* go back to full RSS if we're done with our VMQs */
9851
- if (adapter -> ring_feature [RING_F_VMDQ ].limit == 1 ) {
9852
- int rss = min_t (int , ixgbe_max_rss_indices (adapter ),
9853
- num_online_cpus ());
9854
-
9855
- adapter -> flags &= ~IXGBE_FLAG_VMDQ_ENABLED ;
9856
- adapter -> flags &= ~IXGBE_FLAG_SRIOV_ENABLED ;
9857
- adapter -> ring_feature [RING_F_RSS ].limit = rss ;
9858
- }
9859
-
9860
- ixgbe_setup_tc (pdev , adapter -> hw_tcs );
9861
- netdev_dbg (pdev , "pool %i:%i queues %i:%i\n" ,
9862
- accel -> pool , adapter -> num_rx_pools ,
9863
- accel -> rx_base_queue ,
9864
- accel -> rx_base_queue +
9865
- adapter -> num_rx_queues_per_pool );
9866
9938
kfree (accel );
9867
9939
}
9868
9940
0 commit comments