@@ -734,6 +734,8 @@ static inline int is_eth_imm(const struct sk_buff *skb, unsigned int chip_ver)
734
734
chip_ver > CHELSIO_T5 ) {
735
735
hdrlen = sizeof (struct cpl_tx_tnl_lso );
736
736
hdrlen += sizeof (struct cpl_tx_pkt_core );
737
+ } else if (skb_shinfo (skb )-> gso_type & SKB_GSO_UDP_L4 ) {
738
+ return 0 ;
737
739
} else {
738
740
hdrlen = skb_shinfo (skb )-> gso_size ?
739
741
sizeof (struct cpl_tx_pkt_lso_core ) : 0 ;
@@ -775,12 +777,20 @@ static inline unsigned int calc_tx_flits(const struct sk_buff *skb,
775
777
*/
776
778
flits = sgl_len (skb_shinfo (skb )-> nr_frags + 1 );
777
779
if (skb_shinfo (skb )-> gso_size ) {
778
- if (skb -> encapsulation && chip_ver > CHELSIO_T5 )
780
+ if (skb -> encapsulation && chip_ver > CHELSIO_T5 ) {
779
781
hdrlen = sizeof (struct fw_eth_tx_pkt_wr ) +
780
782
sizeof (struct cpl_tx_tnl_lso );
781
- else
783
+ } else if (skb_shinfo (skb )-> gso_type & SKB_GSO_UDP_L4 ) {
784
+ u32 pkt_hdrlen ;
785
+
786
+ pkt_hdrlen = eth_get_headlen (skb -> dev , skb -> data ,
787
+ skb_headlen (skb ));
788
+ hdrlen = sizeof (struct fw_eth_tx_eo_wr ) +
789
+ round_up (pkt_hdrlen , 16 );
790
+ } else {
782
791
hdrlen = sizeof (struct fw_eth_tx_pkt_wr ) +
783
792
sizeof (struct cpl_tx_pkt_lso_core );
793
+ }
784
794
785
795
hdrlen += sizeof (struct cpl_tx_pkt_core );
786
796
flits += (hdrlen / sizeof (__be64 ));
@@ -1345,6 +1355,25 @@ static inline int cxgb4_validate_skb(struct sk_buff *skb,
1345
1355
return 0 ;
1346
1356
}
1347
1357
1358
+ static void * write_eo_udp_wr (struct sk_buff * skb , struct fw_eth_tx_eo_wr * wr ,
1359
+ u32 hdr_len )
1360
+ {
1361
+ wr -> u .udpseg .type = FW_ETH_TX_EO_TYPE_UDPSEG ;
1362
+ wr -> u .udpseg .ethlen = skb_network_offset (skb );
1363
+ wr -> u .udpseg .iplen = cpu_to_be16 (skb_network_header_len (skb ));
1364
+ wr -> u .udpseg .udplen = sizeof (struct udphdr );
1365
+ wr -> u .udpseg .rtplen = 0 ;
1366
+ wr -> u .udpseg .r4 = 0 ;
1367
+ if (skb_shinfo (skb )-> gso_size )
1368
+ wr -> u .udpseg .mss = cpu_to_be16 (skb_shinfo (skb )-> gso_size );
1369
+ else
1370
+ wr -> u .udpseg .mss = cpu_to_be16 (skb -> len - hdr_len );
1371
+ wr -> u .udpseg .schedpktsize = wr -> u .udpseg .mss ;
1372
+ wr -> u .udpseg .plen = cpu_to_be32 (skb -> len - hdr_len );
1373
+
1374
+ return (void * )(wr + 1 );
1375
+ }
1376
+
1348
1377
/**
1349
1378
* cxgb4_eth_xmit - add a packet to an Ethernet Tx queue
1350
1379
* @skb: the packet
@@ -1357,14 +1386,15 @@ static netdev_tx_t cxgb4_eth_xmit(struct sk_buff *skb, struct net_device *dev)
1357
1386
enum cpl_tx_tnl_lso_type tnl_type = TX_TNL_TYPE_OPAQUE ;
1358
1387
bool ptp_enabled = is_ptp_enabled (skb , dev );
1359
1388
unsigned int last_desc , flits , ndesc ;
1389
+ u32 wr_mid , ctrl0 , op , sgl_off = 0 ;
1360
1390
const struct skb_shared_info * ssi ;
1391
+ int len , qidx , credits , ret , left ;
1361
1392
struct tx_sw_desc * sgl_sdesc ;
1393
+ struct fw_eth_tx_eo_wr * eowr ;
1362
1394
struct fw_eth_tx_pkt_wr * wr ;
1363
1395
struct cpl_tx_pkt_core * cpl ;
1364
- int len , qidx , credits , ret ;
1365
1396
const struct port_info * pi ;
1366
1397
bool immediate = false;
1367
- u32 wr_mid , ctrl0 , op ;
1368
1398
u64 cntrl , * end , * sgl ;
1369
1399
struct sge_eth_txq * q ;
1370
1400
unsigned int chip_ver ;
@@ -1469,13 +1499,17 @@ static netdev_tx_t cxgb4_eth_xmit(struct sk_buff *skb, struct net_device *dev)
1469
1499
}
1470
1500
1471
1501
wr = (void * )& q -> q .desc [q -> q .pidx ];
1502
+ eowr = (void * )& q -> q .desc [q -> q .pidx ];
1472
1503
wr -> equiq_to_len16 = htonl (wr_mid );
1473
1504
wr -> r3 = cpu_to_be64 (0 );
1474
- end = (u64 * )wr + flits ;
1505
+ if (skb_shinfo (skb )-> gso_type & SKB_GSO_UDP_L4 )
1506
+ end = (u64 * )eowr + flits ;
1507
+ else
1508
+ end = (u64 * )wr + flits ;
1475
1509
1476
1510
len = immediate ? skb -> len : 0 ;
1477
1511
len += sizeof (* cpl );
1478
- if (ssi -> gso_size ) {
1512
+ if (ssi -> gso_size && !( ssi -> gso_type & SKB_GSO_UDP_L4 ) ) {
1479
1513
struct cpl_tx_pkt_lso_core * lso = (void * )(wr + 1 );
1480
1514
struct cpl_tx_tnl_lso * tnl_lso = (void * )(wr + 1 );
1481
1515
@@ -1507,20 +1541,29 @@ static netdev_tx_t cxgb4_eth_xmit(struct sk_buff *skb, struct net_device *dev)
1507
1541
cntrl = hwcsum (adap -> params .chip , skb );
1508
1542
}
1509
1543
sgl = (u64 * )(cpl + 1 ); /* sgl start here */
1510
- if (unlikely ((u8 * )sgl >= (u8 * )q -> q .stat )) {
1511
- /* If current position is already at the end of the
1512
- * txq, reset the current to point to start of the queue
1513
- * and update the end ptr as well.
1514
- */
1515
- if (sgl == (u64 * )q -> q .stat ) {
1516
- int left = (u8 * )end - (u8 * )q -> q .stat ;
1517
-
1518
- end = (void * )q -> q .desc + left ;
1519
- sgl = (void * )q -> q .desc ;
1520
- }
1521
- }
1522
1544
q -> tso ++ ;
1523
1545
q -> tx_cso += ssi -> gso_segs ;
1546
+ } else if (ssi -> gso_size ) {
1547
+ u64 * start ;
1548
+ u32 hdrlen ;
1549
+
1550
+ hdrlen = eth_get_headlen (dev , skb -> data , skb_headlen (skb ));
1551
+ len += hdrlen ;
1552
+ wr -> op_immdlen = cpu_to_be32 (FW_WR_OP_V (FW_ETH_TX_EO_WR ) |
1553
+ FW_ETH_TX_EO_WR_IMMDLEN_V (len ));
1554
+ cpl = write_eo_udp_wr (skb , eowr , hdrlen );
1555
+ cntrl = hwcsum (adap -> params .chip , skb );
1556
+
1557
+ start = (u64 * )(cpl + 1 );
1558
+ sgl = (u64 * )inline_tx_skb_header (skb , & q -> q , (void * )start ,
1559
+ hdrlen );
1560
+ if (unlikely (start > sgl )) {
1561
+ left = (u8 * )end - (u8 * )q -> q .stat ;
1562
+ end = (void * )q -> q .desc + left ;
1563
+ }
1564
+ sgl_off = hdrlen ;
1565
+ q -> uso ++ ;
1566
+ q -> tx_cso += ssi -> gso_segs ;
1524
1567
} else {
1525
1568
if (ptp_enabled )
1526
1569
op = FW_PTP_TX_PKT_WR ;
@@ -1537,6 +1580,16 @@ static netdev_tx_t cxgb4_eth_xmit(struct sk_buff *skb, struct net_device *dev)
1537
1580
}
1538
1581
}
1539
1582
1583
+ if (unlikely ((u8 * )sgl >= (u8 * )q -> q .stat )) {
1584
+ /* If current position is already at the end of the
1585
+ * txq, reset the current to point to start of the queue
1586
+ * and update the end ptr as well.
1587
+ */
1588
+ left = (u8 * )end - (u8 * )q -> q .stat ;
1589
+ end = (void * )q -> q .desc + left ;
1590
+ sgl = (void * )q -> q .desc ;
1591
+ }
1592
+
1540
1593
if (skb_vlan_tag_present (skb )) {
1541
1594
q -> vlan_ins ++ ;
1542
1595
cntrl |= TXPKT_VLAN_VLD_F | TXPKT_VLAN_V (skb_vlan_tag_get (skb ));
@@ -1566,7 +1619,7 @@ static netdev_tx_t cxgb4_eth_xmit(struct sk_buff *skb, struct net_device *dev)
1566
1619
cxgb4_inline_tx_skb (skb , & q -> q , sgl );
1567
1620
dev_consume_skb_any (skb );
1568
1621
} else {
1569
- cxgb4_write_sgl (skb , & q -> q , (void * )sgl , end , 0 ,
1622
+ cxgb4_write_sgl (skb , & q -> q , (void * )sgl , end , sgl_off ,
1570
1623
sgl_sdesc -> addr );
1571
1624
skb_orphan (skb );
1572
1625
sgl_sdesc -> skb = skb ;
@@ -2024,18 +2077,23 @@ static inline u8 ethofld_calc_tx_flits(struct adapter *adap,
2024
2077
u32 wrlen ;
2025
2078
2026
2079
wrlen = sizeof (struct fw_eth_tx_eo_wr ) + sizeof (struct cpl_tx_pkt_core );
2027
- if (skb_shinfo (skb )-> gso_size )
2080
+ if (skb_shinfo (skb )-> gso_size &&
2081
+ !(skb_shinfo (skb )-> gso_type & SKB_GSO_UDP_L4 ))
2028
2082
wrlen += sizeof (struct cpl_tx_pkt_lso_core );
2029
2083
2030
2084
wrlen += roundup (hdr_len , 16 );
2031
2085
2032
2086
/* Packet headers + WR + CPLs */
2033
2087
flits = DIV_ROUND_UP (wrlen , 8 );
2034
2088
2035
- if (skb_shinfo (skb )-> nr_frags > 0 )
2036
- nsgl = sgl_len (skb_shinfo (skb )-> nr_frags );
2037
- else if (skb -> len - hdr_len )
2089
+ if (skb_shinfo (skb )-> nr_frags > 0 ) {
2090
+ if (skb_headlen (skb ) - hdr_len )
2091
+ nsgl = sgl_len (skb_shinfo (skb )-> nr_frags + 1 );
2092
+ else
2093
+ nsgl = sgl_len (skb_shinfo (skb )-> nr_frags );
2094
+ } else if (skb -> len - hdr_len ) {
2038
2095
nsgl = sgl_len (1 );
2096
+ }
2039
2097
2040
2098
return flits + nsgl ;
2041
2099
}
@@ -2049,16 +2107,16 @@ static inline void *write_eo_wr(struct adapter *adap,
2049
2107
struct cpl_tx_pkt_core * cpl ;
2050
2108
u32 immd_len , wrlen16 ;
2051
2109
bool compl = false;
2110
+ u8 ver , proto ;
2111
+
2112
+ ver = ip_hdr (skb )-> version ;
2113
+ proto = (ver == 6 ) ? ipv6_hdr (skb )-> nexthdr : ip_hdr (skb )-> protocol ;
2052
2114
2053
2115
wrlen16 = DIV_ROUND_UP (wrlen , 16 );
2054
2116
immd_len = sizeof (struct cpl_tx_pkt_core );
2055
- if (skb_shinfo (skb )-> gso_size ) {
2056
- if (skb -> encapsulation &&
2057
- CHELSIO_CHIP_VERSION (adap -> params .chip ) > CHELSIO_T5 )
2058
- immd_len += sizeof (struct cpl_tx_tnl_lso );
2059
- else
2060
- immd_len += sizeof (struct cpl_tx_pkt_lso_core );
2061
- }
2117
+ if (skb_shinfo (skb )-> gso_size &&
2118
+ !(skb_shinfo (skb )-> gso_type & SKB_GSO_UDP_L4 ))
2119
+ immd_len += sizeof (struct cpl_tx_pkt_lso_core );
2062
2120
immd_len += hdr_len ;
2063
2121
2064
2122
if (!eosw_txq -> ncompl ||
@@ -2074,23 +2132,27 @@ static inline void *write_eo_wr(struct adapter *adap,
2074
2132
wr -> equiq_to_len16 = cpu_to_be32 (FW_WR_LEN16_V (wrlen16 ) |
2075
2133
FW_WR_FLOWID_V (eosw_txq -> hwtid ));
2076
2134
wr -> r3 = 0 ;
2077
- wr -> u .tcpseg .type = FW_ETH_TX_EO_TYPE_TCPSEG ;
2078
- wr -> u .tcpseg .ethlen = skb_network_offset (skb );
2079
- wr -> u .tcpseg .iplen = cpu_to_be16 (skb_network_header_len (skb ));
2080
- wr -> u .tcpseg .tcplen = tcp_hdrlen (skb );
2081
- wr -> u .tcpseg .tsclk_tsoff = 0 ;
2082
- wr -> u .tcpseg .r4 = 0 ;
2083
- wr -> u .tcpseg .r5 = 0 ;
2084
- wr -> u .tcpseg .plen = cpu_to_be32 (skb -> len - hdr_len );
2085
-
2086
- if (ssi -> gso_size ) {
2087
- struct cpl_tx_pkt_lso_core * lso = (void * )(wr + 1 );
2088
-
2089
- wr -> u .tcpseg .mss = cpu_to_be16 (ssi -> gso_size );
2090
- cpl = write_tso_wr (adap , skb , lso );
2135
+ if (proto == IPPROTO_UDP ) {
2136
+ cpl = write_eo_udp_wr (skb , wr , hdr_len );
2091
2137
} else {
2092
- wr -> u .tcpseg .mss = cpu_to_be16 (0xffff );
2093
- cpl = (void * )(wr + 1 );
2138
+ wr -> u .tcpseg .type = FW_ETH_TX_EO_TYPE_TCPSEG ;
2139
+ wr -> u .tcpseg .ethlen = skb_network_offset (skb );
2140
+ wr -> u .tcpseg .iplen = cpu_to_be16 (skb_network_header_len (skb ));
2141
+ wr -> u .tcpseg .tcplen = tcp_hdrlen (skb );
2142
+ wr -> u .tcpseg .tsclk_tsoff = 0 ;
2143
+ wr -> u .tcpseg .r4 = 0 ;
2144
+ wr -> u .tcpseg .r5 = 0 ;
2145
+ wr -> u .tcpseg .plen = cpu_to_be32 (skb -> len - hdr_len );
2146
+
2147
+ if (ssi -> gso_size ) {
2148
+ struct cpl_tx_pkt_lso_core * lso = (void * )(wr + 1 );
2149
+
2150
+ wr -> u .tcpseg .mss = cpu_to_be16 (ssi -> gso_size );
2151
+ cpl = write_tso_wr (adap , skb , lso );
2152
+ } else {
2153
+ wr -> u .tcpseg .mss = cpu_to_be16 (0xffff );
2154
+ cpl = (void * )(wr + 1 );
2155
+ }
2094
2156
}
2095
2157
2096
2158
eosw_txq -> cred -= wrlen16 ;
@@ -4312,7 +4374,10 @@ int t4_sge_alloc_eth_txq(struct adapter *adap, struct sge_eth_txq *txq,
4312
4374
txq -> q .q_type = CXGB4_TXQ_ETH ;
4313
4375
init_txq (adap , & txq -> q , FW_EQ_ETH_CMD_EQID_G (ntohl (c .eqid_pkd )));
4314
4376
txq -> txq = netdevq ;
4315
- txq -> tso = txq -> tx_cso = txq -> vlan_ins = 0 ;
4377
+ txq -> tso = 0 ;
4378
+ txq -> uso = 0 ;
4379
+ txq -> tx_cso = 0 ;
4380
+ txq -> vlan_ins = 0 ;
4316
4381
txq -> mapping_err = 0 ;
4317
4382
txq -> dbqt = dbqt ;
4318
4383
0 commit comments