@@ -300,65 +300,6 @@ static void deferred_unmap_destructor(struct sk_buff *skb)
300
300
}
301
301
#endif
302
302
303
- static void unmap_sgl (struct device * dev , const struct sk_buff * skb ,
304
- const struct ulptx_sgl * sgl , const struct sge_txq * q )
305
- {
306
- const struct ulptx_sge_pair * p ;
307
- unsigned int nfrags = skb_shinfo (skb )-> nr_frags ;
308
-
309
- if (likely (skb_headlen (skb )))
310
- dma_unmap_single (dev , be64_to_cpu (sgl -> addr0 ), ntohl (sgl -> len0 ),
311
- DMA_TO_DEVICE );
312
- else {
313
- dma_unmap_page (dev , be64_to_cpu (sgl -> addr0 ), ntohl (sgl -> len0 ),
314
- DMA_TO_DEVICE );
315
- nfrags -- ;
316
- }
317
-
318
- /*
319
- * the complexity below is because of the possibility of a wrap-around
320
- * in the middle of an SGL
321
- */
322
- for (p = sgl -> sge ; nfrags >= 2 ; nfrags -= 2 ) {
323
- if (likely ((u8 * )(p + 1 ) <= (u8 * )q -> stat )) {
324
- unmap : dma_unmap_page (dev , be64_to_cpu (p -> addr [0 ]),
325
- ntohl (p -> len [0 ]), DMA_TO_DEVICE );
326
- dma_unmap_page (dev , be64_to_cpu (p -> addr [1 ]),
327
- ntohl (p -> len [1 ]), DMA_TO_DEVICE );
328
- p ++ ;
329
- } else if ((u8 * )p == (u8 * )q -> stat ) {
330
- p = (const struct ulptx_sge_pair * )q -> desc ;
331
- goto unmap ;
332
- } else if ((u8 * )p + 8 == (u8 * )q -> stat ) {
333
- const __be64 * addr = (const __be64 * )q -> desc ;
334
-
335
- dma_unmap_page (dev , be64_to_cpu (addr [0 ]),
336
- ntohl (p -> len [0 ]), DMA_TO_DEVICE );
337
- dma_unmap_page (dev , be64_to_cpu (addr [1 ]),
338
- ntohl (p -> len [1 ]), DMA_TO_DEVICE );
339
- p = (const struct ulptx_sge_pair * )& addr [2 ];
340
- } else {
341
- const __be64 * addr = (const __be64 * )q -> desc ;
342
-
343
- dma_unmap_page (dev , be64_to_cpu (p -> addr [0 ]),
344
- ntohl (p -> len [0 ]), DMA_TO_DEVICE );
345
- dma_unmap_page (dev , be64_to_cpu (addr [0 ]),
346
- ntohl (p -> len [1 ]), DMA_TO_DEVICE );
347
- p = (const struct ulptx_sge_pair * )& addr [1 ];
348
- }
349
- }
350
- if (nfrags ) {
351
- __be64 addr ;
352
-
353
- if ((u8 * )p == (u8 * )q -> stat )
354
- p = (const struct ulptx_sge_pair * )q -> desc ;
355
- addr = (u8 * )p + 16 <= (u8 * )q -> stat ? p -> addr [0 ] :
356
- * (const __be64 * )q -> desc ;
357
- dma_unmap_page (dev , be64_to_cpu (addr ), ntohl (p -> len [0 ]),
358
- DMA_TO_DEVICE );
359
- }
360
- }
361
-
362
303
/**
363
304
* free_tx_desc - reclaims Tx descriptors and their buffers
364
305
* @adapter: the adapter
@@ -372,15 +313,16 @@ unmap: dma_unmap_page(dev, be64_to_cpu(p->addr[0]),
372
313
void free_tx_desc (struct adapter * adap , struct sge_txq * q ,
373
314
unsigned int n , bool unmap )
374
315
{
375
- struct tx_sw_desc * d ;
376
316
unsigned int cidx = q -> cidx ;
377
- struct device * dev = adap -> pdev_dev ;
317
+ struct tx_sw_desc * d ;
378
318
379
319
d = & q -> sdesc [cidx ];
380
320
while (n -- ) {
381
321
if (d -> skb ) { /* an SGL is present */
382
- if (unmap )
383
- unmap_sgl (dev , d -> skb , d -> sgl , q );
322
+ if (unmap && d -> addr [0 ]) {
323
+ unmap_skb (adap -> pdev_dev , d -> skb , d -> addr );
324
+ memset (d -> addr , 0 , sizeof (d -> addr ));
325
+ }
384
326
dev_consume_skb_any (d -> skb );
385
327
d -> skb = NULL ;
386
328
}
@@ -1414,13 +1356,13 @@ static netdev_tx_t cxgb4_eth_xmit(struct sk_buff *skb, struct net_device *dev)
1414
1356
{
1415
1357
enum cpl_tx_tnl_lso_type tnl_type = TX_TNL_TYPE_OPAQUE ;
1416
1358
bool ptp_enabled = is_ptp_enabled (skb , dev );
1417
- dma_addr_t addr [ MAX_SKB_FRAGS + 1 ] ;
1359
+ unsigned int last_desc , flits , ndesc ;
1418
1360
const struct skb_shared_info * ssi ;
1361
+ struct tx_sw_desc * sgl_sdesc ;
1419
1362
struct fw_eth_tx_pkt_wr * wr ;
1420
1363
struct cpl_tx_pkt_core * cpl ;
1421
1364
int len , qidx , credits , ret ;
1422
1365
const struct port_info * pi ;
1423
- unsigned int flits , ndesc ;
1424
1366
bool immediate = false;
1425
1367
u32 wr_mid , ctrl0 , op ;
1426
1368
u64 cntrl , * end , * sgl ;
@@ -1489,8 +1431,14 @@ static netdev_tx_t cxgb4_eth_xmit(struct sk_buff *skb, struct net_device *dev)
1489
1431
if (skb -> encapsulation && chip_ver > CHELSIO_T5 )
1490
1432
tnl_type = cxgb_encap_offload_supported (skb );
1491
1433
1434
+ last_desc = q -> q .pidx + ndesc - 1 ;
1435
+ if (last_desc >= q -> q .size )
1436
+ last_desc -= q -> q .size ;
1437
+ sgl_sdesc = & q -> q .sdesc [last_desc ];
1438
+
1492
1439
if (!immediate &&
1493
- unlikely (cxgb4_map_skb (adap -> pdev_dev , skb , addr ) < 0 )) {
1440
+ unlikely (cxgb4_map_skb (adap -> pdev_dev , skb , sgl_sdesc -> addr ) < 0 )) {
1441
+ memset (sgl_sdesc -> addr , 0 , sizeof (sgl_sdesc -> addr ));
1494
1442
q -> mapping_err ++ ;
1495
1443
if (ptp_enabled )
1496
1444
spin_unlock (& adap -> ptp_lock );
@@ -1618,16 +1566,10 @@ static netdev_tx_t cxgb4_eth_xmit(struct sk_buff *skb, struct net_device *dev)
1618
1566
cxgb4_inline_tx_skb (skb , & q -> q , sgl );
1619
1567
dev_consume_skb_any (skb );
1620
1568
} else {
1621
- int last_desc ;
1622
-
1623
- cxgb4_write_sgl (skb , & q -> q , (void * )sgl , end , 0 , addr );
1569
+ cxgb4_write_sgl (skb , & q -> q , (void * )sgl , end , 0 ,
1570
+ sgl_sdesc -> addr );
1624
1571
skb_orphan (skb );
1625
-
1626
- last_desc = q -> q .pidx + ndesc - 1 ;
1627
- if (last_desc >= q -> q .size )
1628
- last_desc -= q -> q .size ;
1629
- q -> q .sdesc [last_desc ].skb = skb ;
1630
- q -> q .sdesc [last_desc ].sgl = (struct ulptx_sgl * )sgl ;
1572
+ sgl_sdesc -> skb = skb ;
1631
1573
}
1632
1574
1633
1575
txq_advance (& q -> q , ndesc );
@@ -1725,12 +1667,12 @@ static inline unsigned int t4vf_calc_tx_flits(const struct sk_buff *skb)
1725
1667
static netdev_tx_t cxgb4_vf_eth_xmit (struct sk_buff * skb ,
1726
1668
struct net_device * dev )
1727
1669
{
1728
- dma_addr_t addr [ MAX_SKB_FRAGS + 1 ] ;
1670
+ unsigned int last_desc , flits , ndesc ;
1729
1671
const struct skb_shared_info * ssi ;
1730
1672
struct fw_eth_tx_pkt_vm_wr * wr ;
1673
+ struct tx_sw_desc * sgl_sdesc ;
1731
1674
struct cpl_tx_pkt_core * cpl ;
1732
1675
const struct port_info * pi ;
1733
- unsigned int flits , ndesc ;
1734
1676
struct sge_eth_txq * txq ;
1735
1677
struct adapter * adapter ;
1736
1678
int qidx , credits , ret ;
@@ -1782,12 +1724,19 @@ static netdev_tx_t cxgb4_vf_eth_xmit(struct sk_buff *skb,
1782
1724
return NETDEV_TX_BUSY ;
1783
1725
}
1784
1726
1727
+ last_desc = txq -> q .pidx + ndesc - 1 ;
1728
+ if (last_desc >= txq -> q .size )
1729
+ last_desc -= txq -> q .size ;
1730
+ sgl_sdesc = & txq -> q .sdesc [last_desc ];
1731
+
1785
1732
if (!t4vf_is_eth_imm (skb ) &&
1786
- unlikely (cxgb4_map_skb (adapter -> pdev_dev , skb , addr ) < 0 )) {
1733
+ unlikely (cxgb4_map_skb (adapter -> pdev_dev , skb ,
1734
+ sgl_sdesc -> addr ) < 0 )) {
1787
1735
/* We need to map the skb into PCI DMA space (because it can't
1788
1736
* be in-lined directly into the Work Request) and the mapping
1789
1737
* operation failed. Record the error and drop the packet.
1790
1738
*/
1739
+ memset (sgl_sdesc -> addr , 0 , sizeof (sgl_sdesc -> addr ));
1791
1740
txq -> mapping_err ++ ;
1792
1741
goto out_free ;
1793
1742
}
@@ -1962,7 +1911,6 @@ static netdev_tx_t cxgb4_vf_eth_xmit(struct sk_buff *skb,
1962
1911
*/
1963
1912
struct ulptx_sgl * sgl = (struct ulptx_sgl * )(cpl + 1 );
1964
1913
struct sge_txq * tq = & txq -> q ;
1965
- int last_desc ;
1966
1914
1967
1915
/* If the Work Request header was an exact multiple of our TX
1968
1916
* Descriptor length, then it's possible that the starting SGL
@@ -1976,14 +1924,9 @@ static netdev_tx_t cxgb4_vf_eth_xmit(struct sk_buff *skb,
1976
1924
((void * )end - (void * )tq -> stat ));
1977
1925
}
1978
1926
1979
- cxgb4_write_sgl (skb , tq , sgl , end , 0 , addr );
1927
+ cxgb4_write_sgl (skb , tq , sgl , end , 0 , sgl_sdesc -> addr );
1980
1928
skb_orphan (skb );
1981
-
1982
- last_desc = tq -> pidx + ndesc - 1 ;
1983
- if (last_desc >= tq -> size )
1984
- last_desc -= tq -> size ;
1985
- tq -> sdesc [last_desc ].skb = skb ;
1986
- tq -> sdesc [last_desc ].sgl = sgl ;
1929
+ sgl_sdesc -> skb = skb ;
1987
1930
}
1988
1931
1989
1932
/* Advance our internal TX Queue state, tell the hardware about
@@ -2035,7 +1978,7 @@ static inline void eosw_txq_advance_index(u32 *idx, u32 n, u32 max)
2035
1978
void cxgb4_eosw_txq_free_desc (struct adapter * adap ,
2036
1979
struct sge_eosw_txq * eosw_txq , u32 ndesc )
2037
1980
{
2038
- struct sge_eosw_desc * d ;
1981
+ struct tx_sw_desc * d ;
2039
1982
2040
1983
d = & eosw_txq -> desc [eosw_txq -> last_cidx ];
2041
1984
while (ndesc -- ) {
@@ -2167,7 +2110,7 @@ static void ethofld_hard_xmit(struct net_device *dev,
2167
2110
struct cpl_tx_pkt_core * cpl ;
2168
2111
struct fw_eth_tx_eo_wr * wr ;
2169
2112
bool skip_eotx_wr = false;
2170
- struct sge_eosw_desc * d ;
2113
+ struct tx_sw_desc * d ;
2171
2114
struct sk_buff * skb ;
2172
2115
u8 flits , ndesc ;
2173
2116
int left ;
0 commit comments