@@ -353,23 +353,32 @@ static void ixgbe_remove_adapter(struct ixgbe_hw *hw)
353
353
ixgbe_service_event_schedule (adapter );
354
354
}
355
355
356
- static void ixgbe_check_remove (struct ixgbe_hw * hw , u32 reg )
356
+ static u32 ixgbe_check_remove (struct ixgbe_hw * hw , u32 reg )
357
357
{
358
+ u8 __iomem * reg_addr ;
358
359
u32 value ;
360
+ int i ;
361
+
362
+ reg_addr = READ_ONCE (hw -> hw_addr );
363
+ if (ixgbe_removed (reg_addr ))
364
+ return IXGBE_FAILED_READ_REG ;
359
365
360
- /* The following check not only optimizes a bit by not
361
- * performing a read on the status register when the
362
- * register just read was a status register read that
363
- * returned IXGBE_FAILED_READ_REG. It also blocks any
364
- * potential recursion.
366
+ /* Register read of 0xFFFFFFF can indicate the adapter has been removed,
367
+ * so perform several status register reads to determine if the adapter
368
+ * has been removed.
365
369
*/
366
- if (reg == IXGBE_STATUS ) {
367
- ixgbe_remove_adapter (hw );
368
- return ;
370
+ for (i = 0 ; i < IXGBE_FAILED_READ_RETRIES ; i ++ ) {
371
+ value = readl (reg_addr + IXGBE_STATUS );
372
+ if (value != IXGBE_FAILED_READ_REG )
373
+ break ;
374
+ mdelay (3 );
369
375
}
370
- value = ixgbe_read_reg ( hw , IXGBE_STATUS );
376
+
371
377
if (value == IXGBE_FAILED_READ_REG )
372
378
ixgbe_remove_adapter (hw );
379
+ else
380
+ value = readl (reg_addr + reg );
381
+ return value ;
373
382
}
374
383
375
384
/**
@@ -415,7 +424,7 @@ u32 ixgbe_read_reg(struct ixgbe_hw *hw, u32 reg)
415
424
writes_completed :
416
425
value = readl (reg_addr + reg );
417
426
if (unlikely (value == IXGBE_FAILED_READ_REG ))
418
- ixgbe_check_remove (hw , reg );
427
+ value = ixgbe_check_remove (hw , reg );
419
428
return value ;
420
429
}
421
430
@@ -1620,7 +1629,8 @@ static bool ixgbe_alloc_mapped_page(struct ixgbe_ring *rx_ring,
1620
1629
bi -> dma = dma ;
1621
1630
bi -> page = page ;
1622
1631
bi -> page_offset = ixgbe_rx_offset (rx_ring );
1623
- bi -> pagecnt_bias = 1 ;
1632
+ page_ref_add (page , USHRT_MAX - 1 );
1633
+ bi -> pagecnt_bias = USHRT_MAX ;
1624
1634
rx_ring -> rx_stats .alloc_rx_page ++ ;
1625
1635
1626
1636
return true;
@@ -2030,8 +2040,8 @@ static bool ixgbe_can_reuse_rx_page(struct ixgbe_rx_buffer *rx_buffer)
2030
2040
* the pagecnt_bias and page count so that we fully restock the
2031
2041
* number of references the driver holds.
2032
2042
*/
2033
- if (unlikely (! pagecnt_bias )) {
2034
- page_ref_add (page , USHRT_MAX );
2043
+ if (unlikely (pagecnt_bias == 1 )) {
2044
+ page_ref_add (page , USHRT_MAX - 1 );
2035
2045
rx_buffer -> pagecnt_bias = USHRT_MAX ;
2036
2046
}
2037
2047
@@ -7721,7 +7731,8 @@ static void ixgbe_service_task(struct work_struct *work)
7721
7731
7722
7732
static int ixgbe_tso (struct ixgbe_ring * tx_ring ,
7723
7733
struct ixgbe_tx_buffer * first ,
7724
- u8 * hdr_len )
7734
+ u8 * hdr_len ,
7735
+ struct ixgbe_ipsec_tx_data * itd )
7725
7736
{
7726
7737
u32 vlan_macip_lens , type_tucmd , mss_l4len_idx ;
7727
7738
struct sk_buff * skb = first -> skb ;
@@ -7735,6 +7746,7 @@ static int ixgbe_tso(struct ixgbe_ring *tx_ring,
7735
7746
unsigned char * hdr ;
7736
7747
} l4 ;
7737
7748
u32 paylen , l4_offset ;
7749
+ u32 fceof_saidx = 0 ;
7738
7750
int err ;
7739
7751
7740
7752
if (skb -> ip_summed != CHECKSUM_PARTIAL )
@@ -7760,13 +7772,15 @@ static int ixgbe_tso(struct ixgbe_ring *tx_ring,
7760
7772
if (ip .v4 -> version == 4 ) {
7761
7773
unsigned char * csum_start = skb_checksum_start (skb );
7762
7774
unsigned char * trans_start = ip .hdr + (ip .v4 -> ihl * 4 );
7775
+ int len = csum_start - trans_start ;
7763
7776
7764
7777
/* IP header will have to cancel out any data that
7765
- * is not a part of the outer IP header
7778
+ * is not a part of the outer IP header, so set to
7779
+ * a reverse csum if needed, else init check to 0.
7766
7780
*/
7767
- ip .v4 -> check = csum_fold ( csum_partial ( trans_start ,
7768
- csum_start - trans_start ,
7769
- 0 ));
7781
+ ip .v4 -> check = ( skb_shinfo ( skb ) -> gso_type & SKB_GSO_PARTIAL ) ?
7782
+ csum_fold ( csum_partial ( trans_start ,
7783
+ len , 0 )) : 0 ;
7770
7784
type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4 ;
7771
7785
7772
7786
ip .v4 -> tot_len = 0 ;
@@ -7797,12 +7811,15 @@ static int ixgbe_tso(struct ixgbe_ring *tx_ring,
7797
7811
mss_l4len_idx = (* hdr_len - l4_offset ) << IXGBE_ADVTXD_L4LEN_SHIFT ;
7798
7812
mss_l4len_idx |= skb_shinfo (skb )-> gso_size << IXGBE_ADVTXD_MSS_SHIFT ;
7799
7813
7814
+ fceof_saidx |= itd -> sa_idx ;
7815
+ type_tucmd |= itd -> flags | itd -> trailer_len ;
7816
+
7800
7817
/* vlan_macip_lens: HEADLEN, MACLEN, VLAN tag */
7801
7818
vlan_macip_lens = l4 .hdr - ip .hdr ;
7802
7819
vlan_macip_lens |= (ip .hdr - skb -> data ) << IXGBE_ADVTXD_MACLEN_SHIFT ;
7803
7820
vlan_macip_lens |= first -> tx_flags & IXGBE_TX_FLAGS_VLAN_MASK ;
7804
7821
7805
- ixgbe_tx_ctxtdesc (tx_ring , vlan_macip_lens , 0 , type_tucmd ,
7822
+ ixgbe_tx_ctxtdesc (tx_ring , vlan_macip_lens , fceof_saidx , type_tucmd ,
7806
7823
mss_l4len_idx );
7807
7824
7808
7825
return 1 ;
@@ -7864,10 +7881,8 @@ static void ixgbe_tx_csum(struct ixgbe_ring *tx_ring,
7864
7881
vlan_macip_lens |= skb_network_offset (skb ) << IXGBE_ADVTXD_MACLEN_SHIFT ;
7865
7882
vlan_macip_lens |= first -> tx_flags & IXGBE_TX_FLAGS_VLAN_MASK ;
7866
7883
7867
- if (first -> tx_flags & IXGBE_TX_FLAGS_IPSEC ) {
7868
- fceof_saidx |= itd -> sa_idx ;
7869
- type_tucmd |= itd -> flags | itd -> trailer_len ;
7870
- }
7884
+ fceof_saidx |= itd -> sa_idx ;
7885
+ type_tucmd |= itd -> flags | itd -> trailer_len ;
7871
7886
7872
7887
ixgbe_tx_ctxtdesc (tx_ring , vlan_macip_lens , fceof_saidx , type_tucmd , 0 );
7873
7888
}
@@ -8495,7 +8510,7 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
8495
8510
if (skb -> sp && !ixgbe_ipsec_tx (tx_ring , first , & ipsec_tx ))
8496
8511
goto out_drop ;
8497
8512
#endif
8498
- tso = ixgbe_tso (tx_ring , first , & hdr_len );
8513
+ tso = ixgbe_tso (tx_ring , first , & hdr_len , & ipsec_tx );
8499
8514
if (tso < 0 )
8500
8515
goto out_drop ;
8501
8516
else if (!tso )
@@ -9904,15 +9919,15 @@ ixgbe_features_check(struct sk_buff *skb, struct net_device *dev,
9904
9919
9905
9920
/* We can only support IPV4 TSO in tunnels if we can mangle the
9906
9921
* inner IP ID field, so strip TSO if MANGLEID is not supported.
9922
+ * IPsec offoad sets skb->encapsulation but still can handle
9923
+ * the TSO, so it's the exception.
9907
9924
*/
9908
- if (skb -> encapsulation && !(features & NETIF_F_TSO_MANGLEID ))
9909
- features &= ~NETIF_F_TSO ;
9910
-
9911
- #ifdef CONFIG_XFRM_OFFLOAD
9912
- /* IPsec offload doesn't get along well with others *yet* */
9913
- if (skb -> sp )
9914
- features &= ~(NETIF_F_TSO | NETIF_F_HW_CSUM );
9925
+ if (skb -> encapsulation && !(features & NETIF_F_TSO_MANGLEID )) {
9926
+ #ifdef CONFIG_XFRM
9927
+ if (!skb -> sp )
9915
9928
#endif
9929
+ features &= ~NETIF_F_TSO ;
9930
+ }
9916
9931
9917
9932
return features ;
9918
9933
}
0 commit comments