@@ -2786,24 +2786,26 @@ EXPORT_SYMBOL(netif_device_attach);
2786
2786
* Returns a Tx hash based on the given packet descriptor a Tx queues' number
2787
2787
* to be used as a distribution range.
2788
2788
*/
2789
- static u16 skb_tx_hash (const struct net_device * dev , struct sk_buff * skb )
2789
+ static u16 skb_tx_hash (const struct net_device * dev ,
2790
+ const struct net_device * sb_dev ,
2791
+ struct sk_buff * skb )
2790
2792
{
2791
2793
u32 hash ;
2792
2794
u16 qoffset = 0 ;
2793
2795
u16 qcount = dev -> real_num_tx_queues ;
2794
2796
2797
+ if (dev -> num_tc ) {
2798
+ u8 tc = netdev_get_prio_tc_map (dev , skb -> priority );
2799
+
2800
+ qoffset = sb_dev -> tc_to_txq [tc ].offset ;
2801
+ qcount = sb_dev -> tc_to_txq [tc ].count ;
2802
+ }
2803
+
2795
2804
if (skb_rx_queue_recorded (skb )) {
2796
2805
hash = skb_get_rx_queue (skb );
2797
2806
while (unlikely (hash >= qcount ))
2798
2807
hash -= qcount ;
2799
- return hash ;
2800
- }
2801
-
2802
- if (dev -> num_tc ) {
2803
- u8 tc = netdev_get_prio_tc_map (dev , skb -> priority );
2804
-
2805
- qoffset = dev -> tc_to_txq [tc ].offset ;
2806
- qcount = dev -> tc_to_txq [tc ].count ;
2808
+ return hash + qoffset ;
2807
2809
}
2808
2810
2809
2811
return (u16 ) reciprocal_scale (skb_get_hash (skb ), qcount ) + qoffset ;
@@ -3573,7 +3575,8 @@ static int __get_xps_queue_idx(struct net_device *dev, struct sk_buff *skb,
3573
3575
}
3574
3576
#endif
3575
3577
3576
- static int get_xps_queue (struct net_device * dev , struct sk_buff * skb )
3578
+ static int get_xps_queue (struct net_device * dev , struct net_device * sb_dev ,
3579
+ struct sk_buff * skb )
3577
3580
{
3578
3581
#ifdef CONFIG_XPS
3579
3582
struct xps_dev_maps * dev_maps ;
@@ -3587,7 +3590,7 @@ static int get_xps_queue(struct net_device *dev, struct sk_buff *skb)
3587
3590
if (!static_key_false (& xps_rxqs_needed ))
3588
3591
goto get_cpus_map ;
3589
3592
3590
- dev_maps = rcu_dereference (dev -> xps_rxqs_map );
3593
+ dev_maps = rcu_dereference (sb_dev -> xps_rxqs_map );
3591
3594
if (dev_maps ) {
3592
3595
int tci = sk_rx_queue_get (sk );
3593
3596
@@ -3598,7 +3601,7 @@ static int get_xps_queue(struct net_device *dev, struct sk_buff *skb)
3598
3601
3599
3602
get_cpus_map :
3600
3603
if (queue_index < 0 ) {
3601
- dev_maps = rcu_dereference (dev -> xps_cpus_map );
3604
+ dev_maps = rcu_dereference (sb_dev -> xps_cpus_map );
3602
3605
if (dev_maps ) {
3603
3606
unsigned int tci = skb -> sender_cpu - 1 ;
3604
3607
@@ -3614,17 +3617,20 @@ static int get_xps_queue(struct net_device *dev, struct sk_buff *skb)
3614
3617
#endif
3615
3618
}
3616
3619
3617
- static u16 __netdev_pick_tx (struct net_device * dev , struct sk_buff * skb )
3620
+ static u16 ___netdev_pick_tx (struct net_device * dev , struct sk_buff * skb ,
3621
+ struct net_device * sb_dev )
3618
3622
{
3619
3623
struct sock * sk = skb -> sk ;
3620
3624
int queue_index = sk_tx_queue_get (sk );
3621
3625
3626
+ sb_dev = sb_dev ? : dev ;
3627
+
3622
3628
if (queue_index < 0 || skb -> ooo_okay ||
3623
3629
queue_index >= dev -> real_num_tx_queues ) {
3624
- int new_index = get_xps_queue (dev , skb );
3630
+ int new_index = get_xps_queue (dev , sb_dev , skb );
3625
3631
3626
3632
if (new_index < 0 )
3627
- new_index = skb_tx_hash (dev , skb );
3633
+ new_index = skb_tx_hash (dev , sb_dev , skb );
3628
3634
3629
3635
if (queue_index != new_index && sk &&
3630
3636
sk_fullsock (sk ) &&
@@ -3637,9 +3643,15 @@ static u16 __netdev_pick_tx(struct net_device *dev, struct sk_buff *skb)
3637
3643
return queue_index ;
3638
3644
}
3639
3645
3646
+ static u16 __netdev_pick_tx (struct net_device * dev ,
3647
+ struct sk_buff * skb )
3648
+ {
3649
+ return ___netdev_pick_tx (dev , skb , NULL );
3650
+ }
3651
+
3640
3652
struct netdev_queue * netdev_pick_tx (struct net_device * dev ,
3641
3653
struct sk_buff * skb ,
3642
- void * accel_priv )
3654
+ struct net_device * sb_dev )
3643
3655
{
3644
3656
int queue_index = 0 ;
3645
3657
@@ -3654,10 +3666,10 @@ struct netdev_queue *netdev_pick_tx(struct net_device *dev,
3654
3666
const struct net_device_ops * ops = dev -> netdev_ops ;
3655
3667
3656
3668
if (ops -> ndo_select_queue )
3657
- queue_index = ops -> ndo_select_queue (dev , skb , accel_priv ,
3669
+ queue_index = ops -> ndo_select_queue (dev , skb , sb_dev ,
3658
3670
__netdev_pick_tx );
3659
3671
else
3660
- queue_index = __netdev_pick_tx (dev , skb );
3672
+ queue_index = ___netdev_pick_tx (dev , skb , sb_dev );
3661
3673
3662
3674
queue_index = netdev_cap_txqueue (dev , queue_index );
3663
3675
}
@@ -3669,7 +3681,7 @@ struct netdev_queue *netdev_pick_tx(struct net_device *dev,
3669
3681
/**
3670
3682
* __dev_queue_xmit - transmit a buffer
3671
3683
* @skb: buffer to transmit
3672
- * @accel_priv: private data used for L2 forwarding offload
3684
+ * @sb_dev: suboordinate device used for L2 forwarding offload
3673
3685
*
3674
3686
* Queue a buffer for transmission to a network device. The caller must
3675
3687
* have set the device and priority and built the buffer before calling
@@ -3692,7 +3704,7 @@ struct netdev_queue *netdev_pick_tx(struct net_device *dev,
3692
3704
* the BH enable code must have IRQs enabled so that it will not deadlock.
3693
3705
* --BLG
3694
3706
*/
3695
- static int __dev_queue_xmit (struct sk_buff * skb , void * accel_priv )
3707
+ static int __dev_queue_xmit (struct sk_buff * skb , struct net_device * sb_dev )
3696
3708
{
3697
3709
struct net_device * dev = skb -> dev ;
3698
3710
struct netdev_queue * txq ;
@@ -3731,7 +3743,7 @@ static int __dev_queue_xmit(struct sk_buff *skb, void *accel_priv)
3731
3743
else
3732
3744
skb_dst_force (skb );
3733
3745
3734
- txq = netdev_pick_tx (dev , skb , accel_priv );
3746
+ txq = netdev_pick_tx (dev , skb , sb_dev );
3735
3747
q = rcu_dereference_bh (txq -> qdisc );
3736
3748
3737
3749
trace_net_dev_queue (skb );
@@ -3805,9 +3817,9 @@ int dev_queue_xmit(struct sk_buff *skb)
3805
3817
}
3806
3818
EXPORT_SYMBOL (dev_queue_xmit );
3807
3819
3808
- int dev_queue_xmit_accel (struct sk_buff * skb , void * accel_priv )
3820
+ int dev_queue_xmit_accel (struct sk_buff * skb , struct net_device * sb_dev )
3809
3821
{
3810
- return __dev_queue_xmit (skb , accel_priv );
3822
+ return __dev_queue_xmit (skb , sb_dev );
3811
3823
}
3812
3824
EXPORT_SYMBOL (dev_queue_xmit_accel );
3813
3825
0 commit comments