@@ -455,6 +455,11 @@ static int alloc_pg_chunk(struct adapter *adapter, struct sge_fl *q,
455
455
q -> pg_chunk .offset = 0 ;
456
456
mapping = pci_map_page (adapter -> pdev , q -> pg_chunk .page ,
457
457
0 , q -> alloc_size , PCI_DMA_FROMDEVICE );
458
+ if (unlikely (pci_dma_mapping_error (adapter -> pdev , mapping ))) {
459
+ __free_pages (q -> pg_chunk .page , order );
460
+ q -> pg_chunk .page = NULL ;
461
+ return - EIO ;
462
+ }
458
463
q -> pg_chunk .mapping = mapping ;
459
464
}
460
465
sd -> pg_chunk = q -> pg_chunk ;
@@ -949,40 +954,75 @@ static inline unsigned int calc_tx_descs(const struct sk_buff *skb)
949
954
return flits_to_desc (flits );
950
955
}
951
956
957
+
958
+ /* map_skb - map a packet main body and its page fragments
959
+ * @pdev: the PCI device
960
+ * @skb: the packet
961
+ * @addr: placeholder to save the mapped addresses
962
+ *
963
+ * map the main body of an sk_buff and its page fragments, if any.
964
+ */
965
+ static int map_skb (struct pci_dev * pdev , const struct sk_buff * skb ,
966
+ dma_addr_t * addr )
967
+ {
968
+ const skb_frag_t * fp , * end ;
969
+ const struct skb_shared_info * si ;
970
+
971
+ * addr = pci_map_single (pdev , skb -> data , skb_headlen (skb ),
972
+ PCI_DMA_TODEVICE );
973
+ if (pci_dma_mapping_error (pdev , * addr ))
974
+ goto out_err ;
975
+
976
+ si = skb_shinfo (skb );
977
+ end = & si -> frags [si -> nr_frags ];
978
+
979
+ for (fp = si -> frags ; fp < end ; fp ++ ) {
980
+ * ++ addr = skb_frag_dma_map (& pdev -> dev , fp , 0 , skb_frag_size (fp ),
981
+ DMA_TO_DEVICE );
982
+ if (pci_dma_mapping_error (pdev , * addr ))
983
+ goto unwind ;
984
+ }
985
+ return 0 ;
986
+
987
+ unwind :
988
+ while (fp -- > si -> frags )
989
+ dma_unmap_page (& pdev -> dev , * -- addr , skb_frag_size (fp ),
990
+ DMA_TO_DEVICE );
991
+
992
+ pci_unmap_single (pdev , addr [-1 ], skb_headlen (skb ), PCI_DMA_TODEVICE );
993
+ out_err :
994
+ return - ENOMEM ;
995
+ }
996
+
952
997
/**
953
- * make_sgl - populate a scatter/gather list for a packet
998
+ * write_sgl - populate a scatter/gather list for a packet
954
999
* @skb: the packet
955
1000
* @sgp: the SGL to populate
956
1001
* @start: start address of skb main body data to include in the SGL
957
1002
* @len: length of skb main body data to include in the SGL
958
- * @pdev : the PCI device
1003
+ * @addr : the list of the mapped addresses
959
1004
*
960
- * Generates a scatter/gather list for the buffers that make up a packet
1005
+ * Copies the scatter/gather list for the buffers that make up a packet
961
1006
* and returns the SGL size in 8-byte words. The caller must size the SGL
962
1007
* appropriately.
963
1008
*/
964
- static inline unsigned int make_sgl (const struct sk_buff * skb ,
1009
+ static inline unsigned int write_sgl (const struct sk_buff * skb ,
965
1010
struct sg_ent * sgp , unsigned char * start ,
966
- unsigned int len , struct pci_dev * pdev )
1011
+ unsigned int len , const dma_addr_t * addr )
967
1012
{
968
- dma_addr_t mapping ;
969
- unsigned int i , j = 0 , nfrags ;
1013
+ unsigned int i , j = 0 , k = 0 , nfrags ;
970
1014
971
1015
if (len ) {
972
- mapping = pci_map_single (pdev , start , len , PCI_DMA_TODEVICE );
973
1016
sgp -> len [0 ] = cpu_to_be32 (len );
974
- sgp -> addr [0 ] = cpu_to_be64 (mapping );
975
- j = 1 ;
1017
+ sgp -> addr [j ++ ] = cpu_to_be64 (addr [k ++ ]);
976
1018
}
977
1019
978
1020
nfrags = skb_shinfo (skb )-> nr_frags ;
979
1021
for (i = 0 ; i < nfrags ; i ++ ) {
980
1022
const skb_frag_t * frag = & skb_shinfo (skb )-> frags [i ];
981
1023
982
- mapping = skb_frag_dma_map (& pdev -> dev , frag , 0 , skb_frag_size (frag ),
983
- DMA_TO_DEVICE );
984
1024
sgp -> len [j ] = cpu_to_be32 (skb_frag_size (frag ));
985
- sgp -> addr [j ] = cpu_to_be64 (mapping );
1025
+ sgp -> addr [j ] = cpu_to_be64 (addr [ k ++ ] );
986
1026
j ^= 1 ;
987
1027
if (j == 0 )
988
1028
++ sgp ;
@@ -1138,7 +1178,7 @@ static void write_tx_pkt_wr(struct adapter *adap, struct sk_buff *skb,
1138
1178
const struct port_info * pi ,
1139
1179
unsigned int pidx , unsigned int gen ,
1140
1180
struct sge_txq * q , unsigned int ndesc ,
1141
- unsigned int compl )
1181
+ unsigned int compl , const dma_addr_t * addr )
1142
1182
{
1143
1183
unsigned int flits , sgl_flits , cntrl , tso_info ;
1144
1184
struct sg_ent * sgp , sgl [MAX_SKB_FRAGS / 2 + 1 ];
@@ -1196,7 +1236,7 @@ static void write_tx_pkt_wr(struct adapter *adap, struct sk_buff *skb,
1196
1236
}
1197
1237
1198
1238
sgp = ndesc == 1 ? (struct sg_ent * )& d -> flit [flits ] : sgl ;
1199
- sgl_flits = make_sgl (skb , sgp , skb -> data , skb_headlen (skb ), adap -> pdev );
1239
+ sgl_flits = write_sgl (skb , sgp , skb -> data , skb_headlen (skb ), addr );
1200
1240
1201
1241
write_wr_hdr_sgl (ndesc , skb , d , pidx , q , sgl , flits , sgl_flits , gen ,
1202
1242
htonl (V_WR_OP (FW_WROPCODE_TUNNEL_TX_PKT ) | compl ),
@@ -1227,6 +1267,7 @@ netdev_tx_t t3_eth_xmit(struct sk_buff *skb, struct net_device *dev)
1227
1267
struct netdev_queue * txq ;
1228
1268
struct sge_qset * qs ;
1229
1269
struct sge_txq * q ;
1270
+ dma_addr_t addr [MAX_SKB_FRAGS + 1 ];
1230
1271
1231
1272
/*
1232
1273
* The chip min packet length is 9 octets but play safe and reject
@@ -1255,6 +1296,11 @@ netdev_tx_t t3_eth_xmit(struct sk_buff *skb, struct net_device *dev)
1255
1296
return NETDEV_TX_BUSY ;
1256
1297
}
1257
1298
1299
+ if (unlikely (map_skb (adap -> pdev , skb , addr ) < 0 )) {
1300
+ dev_kfree_skb (skb );
1301
+ return NETDEV_TX_OK ;
1302
+ }
1303
+
1258
1304
q -> in_use += ndesc ;
1259
1305
if (unlikely (credits - ndesc < q -> stop_thres )) {
1260
1306
t3_stop_tx_queue (txq , qs , q );
@@ -1312,7 +1358,7 @@ netdev_tx_t t3_eth_xmit(struct sk_buff *skb, struct net_device *dev)
1312
1358
if (likely (!skb_shared (skb )))
1313
1359
skb_orphan (skb );
1314
1360
1315
- write_tx_pkt_wr (adap , skb , pi , pidx , gen , q , ndesc , compl );
1361
+ write_tx_pkt_wr (adap , skb , pi , pidx , gen , q , ndesc , compl , addr );
1316
1362
check_ring_tx_db (adap , q );
1317
1363
return NETDEV_TX_OK ;
1318
1364
}
@@ -1578,7 +1624,8 @@ static void setup_deferred_unmapping(struct sk_buff *skb, struct pci_dev *pdev,
1578
1624
*/
1579
1625
static void write_ofld_wr (struct adapter * adap , struct sk_buff * skb ,
1580
1626
struct sge_txq * q , unsigned int pidx ,
1581
- unsigned int gen , unsigned int ndesc )
1627
+ unsigned int gen , unsigned int ndesc ,
1628
+ const dma_addr_t * addr )
1582
1629
{
1583
1630
unsigned int sgl_flits , flits ;
1584
1631
struct work_request_hdr * from ;
@@ -1599,9 +1646,9 @@ static void write_ofld_wr(struct adapter *adap, struct sk_buff *skb,
1599
1646
1600
1647
flits = skb_transport_offset (skb ) / 8 ;
1601
1648
sgp = ndesc == 1 ? (struct sg_ent * )& d -> flit [flits ] : sgl ;
1602
- sgl_flits = make_sgl (skb , sgp , skb_transport_header (skb ),
1649
+ sgl_flits = write_sgl (skb , sgp , skb_transport_header (skb ),
1603
1650
skb -> tail - skb -> transport_header ,
1604
- adap -> pdev );
1651
+ addr );
1605
1652
if (need_skb_unmap ()) {
1606
1653
setup_deferred_unmapping (skb , adap -> pdev , sgp , sgl_flits );
1607
1654
skb -> destructor = deferred_unmap_destructor ;
@@ -1659,6 +1706,11 @@ again: reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK);
1659
1706
goto again ;
1660
1707
}
1661
1708
1709
+ if (map_skb (adap -> pdev , skb , (dma_addr_t * )skb -> head )) {
1710
+ spin_unlock (& q -> lock );
1711
+ return NET_XMIT_SUCCESS ;
1712
+ }
1713
+
1662
1714
gen = q -> gen ;
1663
1715
q -> in_use += ndesc ;
1664
1716
pidx = q -> pidx ;
@@ -1669,7 +1721,7 @@ again: reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK);
1669
1721
}
1670
1722
spin_unlock (& q -> lock );
1671
1723
1672
- write_ofld_wr (adap , skb , q , pidx , gen , ndesc );
1724
+ write_ofld_wr (adap , skb , q , pidx , gen , ndesc , ( dma_addr_t * ) skb -> head );
1673
1725
check_ring_tx_db (adap , q );
1674
1726
return NET_XMIT_SUCCESS ;
1675
1727
}
@@ -1687,6 +1739,7 @@ static void restart_offloadq(unsigned long data)
1687
1739
struct sge_txq * q = & qs -> txq [TXQ_OFLD ];
1688
1740
const struct port_info * pi = netdev_priv (qs -> netdev );
1689
1741
struct adapter * adap = pi -> adapter ;
1742
+ unsigned int written = 0 ;
1690
1743
1691
1744
spin_lock (& q -> lock );
1692
1745
again : reclaim_completed_tx (adap , q , TX_RECLAIM_CHUNK );
@@ -1706,18 +1759,23 @@ again: reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK);
1706
1759
break ;
1707
1760
}
1708
1761
1762
+ if (map_skb (adap -> pdev , skb , (dma_addr_t * )skb -> head ))
1763
+ break ;
1764
+
1709
1765
gen = q -> gen ;
1710
1766
q -> in_use += ndesc ;
1711
1767
pidx = q -> pidx ;
1712
1768
q -> pidx += ndesc ;
1769
+ written += ndesc ;
1713
1770
if (q -> pidx >= q -> size ) {
1714
1771
q -> pidx -= q -> size ;
1715
1772
q -> gen ^= 1 ;
1716
1773
}
1717
1774
__skb_unlink (skb , & q -> sendq );
1718
1775
spin_unlock (& q -> lock );
1719
1776
1720
- write_ofld_wr (adap , skb , q , pidx , gen , ndesc );
1777
+ write_ofld_wr (adap , skb , q , pidx , gen , ndesc ,
1778
+ (dma_addr_t * )skb -> head );
1721
1779
spin_lock (& q -> lock );
1722
1780
}
1723
1781
spin_unlock (& q -> lock );
@@ -1727,8 +1785,9 @@ again: reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK);
1727
1785
set_bit (TXQ_LAST_PKT_DB , & q -> flags );
1728
1786
#endif
1729
1787
wmb ();
1730
- t3_write_reg (adap , A_SG_KDOORBELL ,
1731
- F_SELEGRCNTX | V_EGRCNTX (q -> cntxt_id ));
1788
+ if (likely (written ))
1789
+ t3_write_reg (adap , A_SG_KDOORBELL ,
1790
+ F_SELEGRCNTX | V_EGRCNTX (q -> cntxt_id ));
1732
1791
}
1733
1792
1734
1793
/**
0 commit comments