Skip to content

Commit f83331b

Browse files
Santosh Rastapurdavem330
authored andcommitted
cxgb3: Check and handle the dma mapping errors
This patch adds checks at approprate places whether *dma_map*() call has succeeded or not. Signed-off-by: Santosh Rastapur <[email protected]> Reviewed-by: Jay Fenlason <[email protected]> Signed-off-by: David S. Miller <[email protected]>
1 parent 9313eb4 commit f83331b

File tree

1 file changed

+82
-23
lines changed
  • drivers/net/ethernet/chelsio/cxgb3

1 file changed

+82
-23
lines changed

drivers/net/ethernet/chelsio/cxgb3/sge.c

Lines changed: 82 additions & 23 deletions
Original file line numberDiff line numberDiff line change
@@ -455,6 +455,11 @@ static int alloc_pg_chunk(struct adapter *adapter, struct sge_fl *q,
455455
q->pg_chunk.offset = 0;
456456
mapping = pci_map_page(adapter->pdev, q->pg_chunk.page,
457457
0, q->alloc_size, PCI_DMA_FROMDEVICE);
458+
if (unlikely(pci_dma_mapping_error(adapter->pdev, mapping))) {
459+
__free_pages(q->pg_chunk.page, order);
460+
q->pg_chunk.page = NULL;
461+
return -EIO;
462+
}
458463
q->pg_chunk.mapping = mapping;
459464
}
460465
sd->pg_chunk = q->pg_chunk;
@@ -949,40 +954,75 @@ static inline unsigned int calc_tx_descs(const struct sk_buff *skb)
949954
return flits_to_desc(flits);
950955
}
951956

957+
958+
/* map_skb - map a packet main body and its page fragments
959+
* @pdev: the PCI device
960+
* @skb: the packet
961+
* @addr: placeholder to save the mapped addresses
962+
*
963+
* map the main body of an sk_buff and its page fragments, if any.
964+
*/
965+
static int map_skb(struct pci_dev *pdev, const struct sk_buff *skb,
966+
dma_addr_t *addr)
967+
{
968+
const skb_frag_t *fp, *end;
969+
const struct skb_shared_info *si;
970+
971+
*addr = pci_map_single(pdev, skb->data, skb_headlen(skb),
972+
PCI_DMA_TODEVICE);
973+
if (pci_dma_mapping_error(pdev, *addr))
974+
goto out_err;
975+
976+
si = skb_shinfo(skb);
977+
end = &si->frags[si->nr_frags];
978+
979+
for (fp = si->frags; fp < end; fp++) {
980+
*++addr = skb_frag_dma_map(&pdev->dev, fp, 0, skb_frag_size(fp),
981+
DMA_TO_DEVICE);
982+
if (pci_dma_mapping_error(pdev, *addr))
983+
goto unwind;
984+
}
985+
return 0;
986+
987+
unwind:
988+
while (fp-- > si->frags)
989+
dma_unmap_page(&pdev->dev, *--addr, skb_frag_size(fp),
990+
DMA_TO_DEVICE);
991+
992+
pci_unmap_single(pdev, addr[-1], skb_headlen(skb), PCI_DMA_TODEVICE);
993+
out_err:
994+
return -ENOMEM;
995+
}
996+
952997
/**
953-
* make_sgl - populate a scatter/gather list for a packet
998+
* write_sgl - populate a scatter/gather list for a packet
954999
* @skb: the packet
9551000
* @sgp: the SGL to populate
9561001
* @start: start address of skb main body data to include in the SGL
9571002
* @len: length of skb main body data to include in the SGL
958-
* @pdev: the PCI device
1003+
* @addr: the list of the mapped addresses
9591004
*
960-
* Generates a scatter/gather list for the buffers that make up a packet
1005+
* Copies the scatter/gather list for the buffers that make up a packet
9611006
* and returns the SGL size in 8-byte words. The caller must size the SGL
9621007
* appropriately.
9631008
*/
964-
static inline unsigned int make_sgl(const struct sk_buff *skb,
1009+
static inline unsigned int write_sgl(const struct sk_buff *skb,
9651010
struct sg_ent *sgp, unsigned char *start,
966-
unsigned int len, struct pci_dev *pdev)
1011+
unsigned int len, const dma_addr_t *addr)
9671012
{
968-
dma_addr_t mapping;
969-
unsigned int i, j = 0, nfrags;
1013+
unsigned int i, j = 0, k = 0, nfrags;
9701014

9711015
if (len) {
972-
mapping = pci_map_single(pdev, start, len, PCI_DMA_TODEVICE);
9731016
sgp->len[0] = cpu_to_be32(len);
974-
sgp->addr[0] = cpu_to_be64(mapping);
975-
j = 1;
1017+
sgp->addr[j++] = cpu_to_be64(addr[k++]);
9761018
}
9771019

9781020
nfrags = skb_shinfo(skb)->nr_frags;
9791021
for (i = 0; i < nfrags; i++) {
9801022
const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
9811023

982-
mapping = skb_frag_dma_map(&pdev->dev, frag, 0, skb_frag_size(frag),
983-
DMA_TO_DEVICE);
9841024
sgp->len[j] = cpu_to_be32(skb_frag_size(frag));
985-
sgp->addr[j] = cpu_to_be64(mapping);
1025+
sgp->addr[j] = cpu_to_be64(addr[k++]);
9861026
j ^= 1;
9871027
if (j == 0)
9881028
++sgp;
@@ -1138,7 +1178,7 @@ static void write_tx_pkt_wr(struct adapter *adap, struct sk_buff *skb,
11381178
const struct port_info *pi,
11391179
unsigned int pidx, unsigned int gen,
11401180
struct sge_txq *q, unsigned int ndesc,
1141-
unsigned int compl)
1181+
unsigned int compl, const dma_addr_t *addr)
11421182
{
11431183
unsigned int flits, sgl_flits, cntrl, tso_info;
11441184
struct sg_ent *sgp, sgl[MAX_SKB_FRAGS / 2 + 1];
@@ -1196,7 +1236,7 @@ static void write_tx_pkt_wr(struct adapter *adap, struct sk_buff *skb,
11961236
}
11971237

11981238
sgp = ndesc == 1 ? (struct sg_ent *)&d->flit[flits] : sgl;
1199-
sgl_flits = make_sgl(skb, sgp, skb->data, skb_headlen(skb), adap->pdev);
1239+
sgl_flits = write_sgl(skb, sgp, skb->data, skb_headlen(skb), addr);
12001240

12011241
write_wr_hdr_sgl(ndesc, skb, d, pidx, q, sgl, flits, sgl_flits, gen,
12021242
htonl(V_WR_OP(FW_WROPCODE_TUNNEL_TX_PKT) | compl),
@@ -1227,6 +1267,7 @@ netdev_tx_t t3_eth_xmit(struct sk_buff *skb, struct net_device *dev)
12271267
struct netdev_queue *txq;
12281268
struct sge_qset *qs;
12291269
struct sge_txq *q;
1270+
dma_addr_t addr[MAX_SKB_FRAGS + 1];
12301271

12311272
/*
12321273
* The chip min packet length is 9 octets but play safe and reject
@@ -1255,6 +1296,11 @@ netdev_tx_t t3_eth_xmit(struct sk_buff *skb, struct net_device *dev)
12551296
return NETDEV_TX_BUSY;
12561297
}
12571298

1299+
if (unlikely(map_skb(adap->pdev, skb, addr) < 0)) {
1300+
dev_kfree_skb(skb);
1301+
return NETDEV_TX_OK;
1302+
}
1303+
12581304
q->in_use += ndesc;
12591305
if (unlikely(credits - ndesc < q->stop_thres)) {
12601306
t3_stop_tx_queue(txq, qs, q);
@@ -1312,7 +1358,7 @@ netdev_tx_t t3_eth_xmit(struct sk_buff *skb, struct net_device *dev)
13121358
if (likely(!skb_shared(skb)))
13131359
skb_orphan(skb);
13141360

1315-
write_tx_pkt_wr(adap, skb, pi, pidx, gen, q, ndesc, compl);
1361+
write_tx_pkt_wr(adap, skb, pi, pidx, gen, q, ndesc, compl, addr);
13161362
check_ring_tx_db(adap, q);
13171363
return NETDEV_TX_OK;
13181364
}
@@ -1578,7 +1624,8 @@ static void setup_deferred_unmapping(struct sk_buff *skb, struct pci_dev *pdev,
15781624
*/
15791625
static void write_ofld_wr(struct adapter *adap, struct sk_buff *skb,
15801626
struct sge_txq *q, unsigned int pidx,
1581-
unsigned int gen, unsigned int ndesc)
1627+
unsigned int gen, unsigned int ndesc,
1628+
const dma_addr_t *addr)
15821629
{
15831630
unsigned int sgl_flits, flits;
15841631
struct work_request_hdr *from;
@@ -1599,9 +1646,9 @@ static void write_ofld_wr(struct adapter *adap, struct sk_buff *skb,
15991646

16001647
flits = skb_transport_offset(skb) / 8;
16011648
sgp = ndesc == 1 ? (struct sg_ent *)&d->flit[flits] : sgl;
1602-
sgl_flits = make_sgl(skb, sgp, skb_transport_header(skb),
1649+
sgl_flits = write_sgl(skb, sgp, skb_transport_header(skb),
16031650
skb->tail - skb->transport_header,
1604-
adap->pdev);
1651+
addr);
16051652
if (need_skb_unmap()) {
16061653
setup_deferred_unmapping(skb, adap->pdev, sgp, sgl_flits);
16071654
skb->destructor = deferred_unmap_destructor;
@@ -1659,6 +1706,11 @@ again: reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK);
16591706
goto again;
16601707
}
16611708

1709+
if (map_skb(adap->pdev, skb, (dma_addr_t *)skb->head)) {
1710+
spin_unlock(&q->lock);
1711+
return NET_XMIT_SUCCESS;
1712+
}
1713+
16621714
gen = q->gen;
16631715
q->in_use += ndesc;
16641716
pidx = q->pidx;
@@ -1669,7 +1721,7 @@ again: reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK);
16691721
}
16701722
spin_unlock(&q->lock);
16711723

1672-
write_ofld_wr(adap, skb, q, pidx, gen, ndesc);
1724+
write_ofld_wr(adap, skb, q, pidx, gen, ndesc, (dma_addr_t *)skb->head);
16731725
check_ring_tx_db(adap, q);
16741726
return NET_XMIT_SUCCESS;
16751727
}
@@ -1687,6 +1739,7 @@ static void restart_offloadq(unsigned long data)
16871739
struct sge_txq *q = &qs->txq[TXQ_OFLD];
16881740
const struct port_info *pi = netdev_priv(qs->netdev);
16891741
struct adapter *adap = pi->adapter;
1742+
unsigned int written = 0;
16901743

16911744
spin_lock(&q->lock);
16921745
again: reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK);
@@ -1706,18 +1759,23 @@ again: reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK);
17061759
break;
17071760
}
17081761

1762+
if (map_skb(adap->pdev, skb, (dma_addr_t *)skb->head))
1763+
break;
1764+
17091765
gen = q->gen;
17101766
q->in_use += ndesc;
17111767
pidx = q->pidx;
17121768
q->pidx += ndesc;
1769+
written += ndesc;
17131770
if (q->pidx >= q->size) {
17141771
q->pidx -= q->size;
17151772
q->gen ^= 1;
17161773
}
17171774
__skb_unlink(skb, &q->sendq);
17181775
spin_unlock(&q->lock);
17191776

1720-
write_ofld_wr(adap, skb, q, pidx, gen, ndesc);
1777+
write_ofld_wr(adap, skb, q, pidx, gen, ndesc,
1778+
(dma_addr_t *)skb->head);
17211779
spin_lock(&q->lock);
17221780
}
17231781
spin_unlock(&q->lock);
@@ -1727,8 +1785,9 @@ again: reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK);
17271785
set_bit(TXQ_LAST_PKT_DB, &q->flags);
17281786
#endif
17291787
wmb();
1730-
t3_write_reg(adap, A_SG_KDOORBELL,
1731-
F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
1788+
if (likely(written))
1789+
t3_write_reg(adap, A_SG_KDOORBELL,
1790+
F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
17321791
}
17331792

17341793
/**

0 commit comments

Comments
 (0)