Skip to content

Commit 34c2e4c

Browse files
John Crispindavem330
authored andcommitted
net: mediatek: fix TX locking
Inside the TX path there is a lock inside the tx_map function. This is however too late. The patch moves the lock to the start of the xmit function right before the free count check of the DMA ring happens. If we do not do this, the code becomes racy leading to TX stalls and dropped packets. This happens as there are 2 netdevs running on the same physical DMA ring. Signed-off-by: John Crispin <[email protected]> Signed-off-by: David S. Miller <[email protected]>
1 parent 13c822f commit 34c2e4c

File tree

1 file changed

+10
-10
lines changed

1 file changed

+10
-10
lines changed

drivers/net/ethernet/mediatek/mtk_eth_soc.c

Lines changed: 10 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -536,7 +536,6 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
536536
struct mtk_eth *eth = mac->hw;
537537
struct mtk_tx_dma *itxd, *txd;
538538
struct mtk_tx_buf *tx_buf;
539-
unsigned long flags;
540539
dma_addr_t mapped_addr;
541540
unsigned int nr_frags;
542541
int i, n_desc = 1;
@@ -568,11 +567,6 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
568567
if (unlikely(dma_mapping_error(&dev->dev, mapped_addr)))
569568
return -ENOMEM;
570569

571-
/* normally we can rely on the stack not calling this more than once,
572-
* however we have 2 queues running ont he same ring so we need to lock
573-
* the ring access
574-
*/
575-
spin_lock_irqsave(&eth->page_lock, flags);
576570
WRITE_ONCE(itxd->txd1, mapped_addr);
577571
tx_buf->flags |= MTK_TX_FLAGS_SINGLE0;
578572
dma_unmap_addr_set(tx_buf, dma_addr0, mapped_addr);
@@ -632,8 +626,6 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
632626
WRITE_ONCE(itxd->txd3, (TX_DMA_SWC | TX_DMA_PLEN0(skb_headlen(skb)) |
633627
(!nr_frags * TX_DMA_LS0)));
634628

635-
spin_unlock_irqrestore(&eth->page_lock, flags);
636-
637629
netdev_sent_queue(dev, skb->len);
638630
skb_tx_timestamp(skb);
639631

@@ -661,8 +653,6 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
661653
itxd = mtk_qdma_phys_to_virt(ring, itxd->txd2);
662654
} while (itxd != txd);
663655

664-
spin_unlock_irqrestore(&eth->page_lock, flags);
665-
666656
return -ENOMEM;
667657
}
668658

@@ -712,14 +702,22 @@ static int mtk_start_xmit(struct sk_buff *skb, struct net_device *dev)
712702
struct mtk_eth *eth = mac->hw;
713703
struct mtk_tx_ring *ring = &eth->tx_ring;
714704
struct net_device_stats *stats = &dev->stats;
705+
unsigned long flags;
715706
bool gso = false;
716707
int tx_num;
717708

709+
/* normally we can rely on the stack not calling this more than once,
710+
* however we have 2 queues running on the same ring so we need to lock
711+
* the ring access
712+
*/
713+
spin_lock_irqsave(&eth->page_lock, flags);
714+
718715
tx_num = mtk_cal_txd_req(skb);
719716
if (unlikely(atomic_read(&ring->free_count) <= tx_num)) {
720717
mtk_stop_queue(eth);
721718
netif_err(eth, tx_queued, dev,
722719
"Tx Ring full when queue awake!\n");
720+
spin_unlock_irqrestore(&eth->page_lock, flags);
723721
return NETDEV_TX_BUSY;
724722
}
725723

@@ -747,10 +745,12 @@ static int mtk_start_xmit(struct sk_buff *skb, struct net_device *dev)
747745
ring->thresh))
748746
mtk_wake_queue(eth);
749747
}
748+
spin_unlock_irqrestore(&eth->page_lock, flags);
750749

751750
return NETDEV_TX_OK;
752751

753752
drop:
753+
spin_unlock_irqrestore(&eth->page_lock, flags);
754754
stats->tx_dropped++;
755755
dev_kfree_skb(skb);
756756
return NETDEV_TX_OK;

0 commit comments

Comments
 (0)