Skip to content

Commit 728e2cc

Browse files
aikdavem330
authored andcommitted
Revert "cxgb3: Check and handle the dma mapping errors"
This reverts commit f83331b. As the tests PPC64 (powernv platform) show, IOMMU pages are leaking when transferring big amount of small packets (<=64 bytes), "ping -f" and waiting for 15 seconds is the simplest way to confirm the bug. Cc: Linus Torvalds <[email protected]> Cc: Santosh Rastapur <[email protected]> Cc: Jay Fenlason <[email protected]> Cc: David S. Miller <[email protected]> Cc: Divy Le ray <[email protected]> Signed-off-by: Alexey Kardashevskiy <[email protected]> Acked-by: Divy Le Ray <[email protected]> Signed-off-by: David S. Miller <[email protected]>
1 parent 3da988c commit 728e2cc

File tree

1 file changed

+24
-83
lines changed
  • drivers/net/ethernet/chelsio/cxgb3

1 file changed

+24
-83
lines changed

drivers/net/ethernet/chelsio/cxgb3/sge.c

Lines changed: 24 additions & 83 deletions
Original file line numberDiff line numberDiff line change
@@ -455,11 +455,6 @@ static int alloc_pg_chunk(struct adapter *adapter, struct sge_fl *q,
455455
q->pg_chunk.offset = 0;
456456
mapping = pci_map_page(adapter->pdev, q->pg_chunk.page,
457457
0, q->alloc_size, PCI_DMA_FROMDEVICE);
458-
if (unlikely(pci_dma_mapping_error(adapter->pdev, mapping))) {
459-
__free_pages(q->pg_chunk.page, order);
460-
q->pg_chunk.page = NULL;
461-
return -EIO;
462-
}
463458
q->pg_chunk.mapping = mapping;
464459
}
465460
sd->pg_chunk = q->pg_chunk;
@@ -954,75 +949,40 @@ static inline unsigned int calc_tx_descs(const struct sk_buff *skb)
954949
return flits_to_desc(flits);
955950
}
956951

957-
958-
/* map_skb - map a packet main body and its page fragments
959-
* @pdev: the PCI device
960-
* @skb: the packet
961-
* @addr: placeholder to save the mapped addresses
962-
*
963-
* map the main body of an sk_buff and its page fragments, if any.
964-
*/
965-
static int map_skb(struct pci_dev *pdev, const struct sk_buff *skb,
966-
dma_addr_t *addr)
967-
{
968-
const skb_frag_t *fp, *end;
969-
const struct skb_shared_info *si;
970-
971-
*addr = pci_map_single(pdev, skb->data, skb_headlen(skb),
972-
PCI_DMA_TODEVICE);
973-
if (pci_dma_mapping_error(pdev, *addr))
974-
goto out_err;
975-
976-
si = skb_shinfo(skb);
977-
end = &si->frags[si->nr_frags];
978-
979-
for (fp = si->frags; fp < end; fp++) {
980-
*++addr = skb_frag_dma_map(&pdev->dev, fp, 0, skb_frag_size(fp),
981-
DMA_TO_DEVICE);
982-
if (pci_dma_mapping_error(pdev, *addr))
983-
goto unwind;
984-
}
985-
return 0;
986-
987-
unwind:
988-
while (fp-- > si->frags)
989-
dma_unmap_page(&pdev->dev, *--addr, skb_frag_size(fp),
990-
DMA_TO_DEVICE);
991-
992-
pci_unmap_single(pdev, addr[-1], skb_headlen(skb), PCI_DMA_TODEVICE);
993-
out_err:
994-
return -ENOMEM;
995-
}
996-
997952
/**
998-
* write_sgl - populate a scatter/gather list for a packet
953+
* make_sgl - populate a scatter/gather list for a packet
999954
* @skb: the packet
1000955
* @sgp: the SGL to populate
1001956
* @start: start address of skb main body data to include in the SGL
1002957
* @len: length of skb main body data to include in the SGL
1003-
* @addr: the list of the mapped addresses
958+
* @pdev: the PCI device
1004959
*
1005-
* Copies the scatter/gather list for the buffers that make up a packet
960+
* Generates a scatter/gather list for the buffers that make up a packet
1006961
* and returns the SGL size in 8-byte words. The caller must size the SGL
1007962
* appropriately.
1008963
*/
1009-
static inline unsigned int write_sgl(const struct sk_buff *skb,
964+
static inline unsigned int make_sgl(const struct sk_buff *skb,
1010965
struct sg_ent *sgp, unsigned char *start,
1011-
unsigned int len, const dma_addr_t *addr)
966+
unsigned int len, struct pci_dev *pdev)
1012967
{
1013-
unsigned int i, j = 0, k = 0, nfrags;
968+
dma_addr_t mapping;
969+
unsigned int i, j = 0, nfrags;
1014970

1015971
if (len) {
972+
mapping = pci_map_single(pdev, start, len, PCI_DMA_TODEVICE);
1016973
sgp->len[0] = cpu_to_be32(len);
1017-
sgp->addr[j++] = cpu_to_be64(addr[k++]);
974+
sgp->addr[0] = cpu_to_be64(mapping);
975+
j = 1;
1018976
}
1019977

1020978
nfrags = skb_shinfo(skb)->nr_frags;
1021979
for (i = 0; i < nfrags; i++) {
1022980
const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1023981

982+
mapping = skb_frag_dma_map(&pdev->dev, frag, 0, skb_frag_size(frag),
983+
DMA_TO_DEVICE);
1024984
sgp->len[j] = cpu_to_be32(skb_frag_size(frag));
1025-
sgp->addr[j] = cpu_to_be64(addr[k++]);
985+
sgp->addr[j] = cpu_to_be64(mapping);
1026986
j ^= 1;
1027987
if (j == 0)
1028988
++sgp;
@@ -1178,7 +1138,7 @@ static void write_tx_pkt_wr(struct adapter *adap, struct sk_buff *skb,
11781138
const struct port_info *pi,
11791139
unsigned int pidx, unsigned int gen,
11801140
struct sge_txq *q, unsigned int ndesc,
1181-
unsigned int compl, const dma_addr_t *addr)
1141+
unsigned int compl)
11821142
{
11831143
unsigned int flits, sgl_flits, cntrl, tso_info;
11841144
struct sg_ent *sgp, sgl[MAX_SKB_FRAGS / 2 + 1];
@@ -1236,7 +1196,7 @@ static void write_tx_pkt_wr(struct adapter *adap, struct sk_buff *skb,
12361196
}
12371197

12381198
sgp = ndesc == 1 ? (struct sg_ent *)&d->flit[flits] : sgl;
1239-
sgl_flits = write_sgl(skb, sgp, skb->data, skb_headlen(skb), addr);
1199+
sgl_flits = make_sgl(skb, sgp, skb->data, skb_headlen(skb), adap->pdev);
12401200

12411201
write_wr_hdr_sgl(ndesc, skb, d, pidx, q, sgl, flits, sgl_flits, gen,
12421202
htonl(V_WR_OP(FW_WROPCODE_TUNNEL_TX_PKT) | compl),
@@ -1267,7 +1227,6 @@ netdev_tx_t t3_eth_xmit(struct sk_buff *skb, struct net_device *dev)
12671227
struct netdev_queue *txq;
12681228
struct sge_qset *qs;
12691229
struct sge_txq *q;
1270-
dma_addr_t addr[MAX_SKB_FRAGS + 1];
12711230

12721231
/*
12731232
* The chip min packet length is 9 octets but play safe and reject
@@ -1296,11 +1255,6 @@ netdev_tx_t t3_eth_xmit(struct sk_buff *skb, struct net_device *dev)
12961255
return NETDEV_TX_BUSY;
12971256
}
12981257

1299-
if (unlikely(map_skb(adap->pdev, skb, addr) < 0)) {
1300-
dev_kfree_skb(skb);
1301-
return NETDEV_TX_OK;
1302-
}
1303-
13041258
q->in_use += ndesc;
13051259
if (unlikely(credits - ndesc < q->stop_thres)) {
13061260
t3_stop_tx_queue(txq, qs, q);
@@ -1358,7 +1312,7 @@ netdev_tx_t t3_eth_xmit(struct sk_buff *skb, struct net_device *dev)
13581312
if (likely(!skb_shared(skb)))
13591313
skb_orphan(skb);
13601314

1361-
write_tx_pkt_wr(adap, skb, pi, pidx, gen, q, ndesc, compl, addr);
1315+
write_tx_pkt_wr(adap, skb, pi, pidx, gen, q, ndesc, compl);
13621316
check_ring_tx_db(adap, q);
13631317
return NETDEV_TX_OK;
13641318
}
@@ -1623,8 +1577,7 @@ static void setup_deferred_unmapping(struct sk_buff *skb, struct pci_dev *pdev,
16231577
*/
16241578
static void write_ofld_wr(struct adapter *adap, struct sk_buff *skb,
16251579
struct sge_txq *q, unsigned int pidx,
1626-
unsigned int gen, unsigned int ndesc,
1627-
const dma_addr_t *addr)
1580+
unsigned int gen, unsigned int ndesc)
16281581
{
16291582
unsigned int sgl_flits, flits;
16301583
struct work_request_hdr *from;
@@ -1645,9 +1598,9 @@ static void write_ofld_wr(struct adapter *adap, struct sk_buff *skb,
16451598

16461599
flits = skb_transport_offset(skb) / 8;
16471600
sgp = ndesc == 1 ? (struct sg_ent *)&d->flit[flits] : sgl;
1648-
sgl_flits = write_sgl(skb, sgp, skb_transport_header(skb),
1649-
skb_tail_pointer(skb) -
1650-
skb_transport_header(skb), addr);
1601+
sgl_flits = make_sgl(skb, sgp, skb_transport_header(skb),
1602+
skb->tail - skb->transport_header,
1603+
adap->pdev);
16511604
if (need_skb_unmap()) {
16521605
setup_deferred_unmapping(skb, adap->pdev, sgp, sgl_flits);
16531606
skb->destructor = deferred_unmap_destructor;
@@ -1705,11 +1658,6 @@ again: reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK);
17051658
goto again;
17061659
}
17071660

1708-
if (map_skb(adap->pdev, skb, (dma_addr_t *)skb->head)) {
1709-
spin_unlock(&q->lock);
1710-
return NET_XMIT_SUCCESS;
1711-
}
1712-
17131661
gen = q->gen;
17141662
q->in_use += ndesc;
17151663
pidx = q->pidx;
@@ -1720,7 +1668,7 @@ again: reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK);
17201668
}
17211669
spin_unlock(&q->lock);
17221670

1723-
write_ofld_wr(adap, skb, q, pidx, gen, ndesc, (dma_addr_t *)skb->head);
1671+
write_ofld_wr(adap, skb, q, pidx, gen, ndesc);
17241672
check_ring_tx_db(adap, q);
17251673
return NET_XMIT_SUCCESS;
17261674
}
@@ -1738,7 +1686,6 @@ static void restart_offloadq(unsigned long data)
17381686
struct sge_txq *q = &qs->txq[TXQ_OFLD];
17391687
const struct port_info *pi = netdev_priv(qs->netdev);
17401688
struct adapter *adap = pi->adapter;
1741-
unsigned int written = 0;
17421689

17431690
spin_lock(&q->lock);
17441691
again: reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK);
@@ -1758,23 +1705,18 @@ again: reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK);
17581705
break;
17591706
}
17601707

1761-
if (map_skb(adap->pdev, skb, (dma_addr_t *)skb->head))
1762-
break;
1763-
17641708
gen = q->gen;
17651709
q->in_use += ndesc;
17661710
pidx = q->pidx;
17671711
q->pidx += ndesc;
1768-
written += ndesc;
17691712
if (q->pidx >= q->size) {
17701713
q->pidx -= q->size;
17711714
q->gen ^= 1;
17721715
}
17731716
__skb_unlink(skb, &q->sendq);
17741717
spin_unlock(&q->lock);
17751718

1776-
write_ofld_wr(adap, skb, q, pidx, gen, ndesc,
1777-
(dma_addr_t *)skb->head);
1719+
write_ofld_wr(adap, skb, q, pidx, gen, ndesc);
17781720
spin_lock(&q->lock);
17791721
}
17801722
spin_unlock(&q->lock);
@@ -1784,9 +1726,8 @@ again: reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK);
17841726
set_bit(TXQ_LAST_PKT_DB, &q->flags);
17851727
#endif
17861728
wmb();
1787-
if (likely(written))
1788-
t3_write_reg(adap, A_SG_KDOORBELL,
1789-
F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
1729+
t3_write_reg(adap, A_SG_KDOORBELL,
1730+
F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
17901731
}
17911732

17921733
/**

0 commit comments

Comments
 (0)