Skip to content

Commit af6f479

Browse files
Wei Fangdavem330
authored andcommitted
net: fec: improve XDP_TX performance
As suggested by Jesper and Alexander, we can avoid converting xdp_buff to xdp_frame in case of XDP_TX to save a bunch of CPU cycles, so that we can further improve the XDP_TX performance. Before this patch on i.MX8MP-EVK board, the performance shows as follows. root@imx8mpevk:~# ./xdp2 eth0 proto 17: 353918 pkt/s proto 17: 352923 pkt/s proto 17: 353900 pkt/s proto 17: 352672 pkt/s proto 17: 353912 pkt/s proto 17: 354219 pkt/s After applying this patch, the performance is improved. root@imx8mpevk:~# ./xdp2 eth0 proto 17: 369261 pkt/s proto 17: 369267 pkt/s proto 17: 369206 pkt/s proto 17: 369214 pkt/s proto 17: 369126 pkt/s proto 17: 369272 pkt/s Signed-off-by: Wei Fang <[email protected]> Suggested-by: Alexander Lobakin <[email protected]> Suggested-by: Jesper Dangaard Brouer <[email protected]> Reviewed-by: Jesper Dangaard Brouer <[email protected]> Signed-off-by: David S. Miller <[email protected]>
1 parent f601899 commit af6f479

File tree

2 files changed

+75
-70
lines changed

2 files changed

+75
-70
lines changed

drivers/net/ethernet/freescale/fec.h

Lines changed: 1 addition & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -552,10 +552,7 @@ enum fec_txbuf_type {
552552
};
553553

554554
struct fec_tx_buffer {
555-
union {
556-
struct sk_buff *skb;
557-
struct xdp_frame *xdp;
558-
};
555+
void *buf_p;
559556
enum fec_txbuf_type type;
560557
};
561558

drivers/net/ethernet/freescale/fec_main.c

Lines changed: 74 additions & 66 deletions
Original file line numberDiff line numberDiff line change
@@ -400,7 +400,7 @@ static void fec_dump(struct net_device *ndev)
400400
fec16_to_cpu(bdp->cbd_sc),
401401
fec32_to_cpu(bdp->cbd_bufaddr),
402402
fec16_to_cpu(bdp->cbd_datlen),
403-
txq->tx_buf[index].skb);
403+
txq->tx_buf[index].buf_p);
404404
bdp = fec_enet_get_nextdesc(bdp, &txq->bd);
405405
index++;
406406
} while (bdp != txq->bd.base);
@@ -657,7 +657,7 @@ static int fec_enet_txq_submit_skb(struct fec_enet_priv_tx_q *txq,
657657

658658
index = fec_enet_get_bd_index(last_bdp, &txq->bd);
659659
/* Save skb pointer */
660-
txq->tx_buf[index].skb = skb;
660+
txq->tx_buf[index].buf_p = skb;
661661

662662
/* Make sure the updates to rest of the descriptor are performed before
663663
* transferring ownership.
@@ -863,7 +863,7 @@ static int fec_enet_txq_submit_tso(struct fec_enet_priv_tx_q *txq,
863863
}
864864

865865
/* Save skb pointer */
866-
txq->tx_buf[index].skb = skb;
866+
txq->tx_buf[index].buf_p = skb;
867867

868868
skb_tx_timestamp(skb);
869869
txq->bd.cur = bdp;
@@ -960,27 +960,27 @@ static void fec_enet_bd_init(struct net_device *dev)
960960
fec32_to_cpu(bdp->cbd_bufaddr),
961961
fec16_to_cpu(bdp->cbd_datlen),
962962
DMA_TO_DEVICE);
963-
if (txq->tx_buf[i].skb) {
964-
dev_kfree_skb_any(txq->tx_buf[i].skb);
965-
txq->tx_buf[i].skb = NULL;
966-
}
967-
} else {
968-
if (bdp->cbd_bufaddr &&
969-
txq->tx_buf[i].type == FEC_TXBUF_T_XDP_NDO)
963+
if (txq->tx_buf[i].buf_p)
964+
dev_kfree_skb_any(txq->tx_buf[i].buf_p);
965+
} else if (txq->tx_buf[i].type == FEC_TXBUF_T_XDP_NDO) {
966+
if (bdp->cbd_bufaddr)
970967
dma_unmap_single(&fep->pdev->dev,
971968
fec32_to_cpu(bdp->cbd_bufaddr),
972969
fec16_to_cpu(bdp->cbd_datlen),
973970
DMA_TO_DEVICE);
974971

975-
if (txq->tx_buf[i].xdp) {
976-
xdp_return_frame(txq->tx_buf[i].xdp);
977-
txq->tx_buf[i].xdp = NULL;
978-
}
972+
if (txq->tx_buf[i].buf_p)
973+
xdp_return_frame(txq->tx_buf[i].buf_p);
974+
} else {
975+
struct page *page = txq->tx_buf[i].buf_p;
979976

980-
/* restore default tx buffer type: FEC_TXBUF_T_SKB */
981-
txq->tx_buf[i].type = FEC_TXBUF_T_SKB;
977+
if (page)
978+
page_pool_put_page(page->pp, page, 0, false);
982979
}
983980

981+
txq->tx_buf[i].buf_p = NULL;
982+
/* restore default tx buffer type: FEC_TXBUF_T_SKB */
983+
txq->tx_buf[i].type = FEC_TXBUF_T_SKB;
984984
bdp->cbd_bufaddr = cpu_to_fec32(0);
985985
bdp = fec_enet_get_nextdesc(bdp, &txq->bd);
986986
}
@@ -1387,6 +1387,8 @@ fec_enet_tx_queue(struct net_device *ndev, u16 queue_id, int budget)
13871387
struct netdev_queue *nq;
13881388
int index = 0;
13891389
int entries_free;
1390+
struct page *page;
1391+
int frame_len;
13901392

13911393
fep = netdev_priv(ndev);
13921394

@@ -1408,8 +1410,7 @@ fec_enet_tx_queue(struct net_device *ndev, u16 queue_id, int budget)
14081410
index = fec_enet_get_bd_index(bdp, &txq->bd);
14091411

14101412
if (txq->tx_buf[index].type == FEC_TXBUF_T_SKB) {
1411-
skb = txq->tx_buf[index].skb;
1412-
txq->tx_buf[index].skb = NULL;
1413+
skb = txq->tx_buf[index].buf_p;
14131414
if (bdp->cbd_bufaddr &&
14141415
!IS_TSO_HEADER(txq, fec32_to_cpu(bdp->cbd_bufaddr)))
14151416
dma_unmap_single(&fep->pdev->dev,
@@ -1428,18 +1429,24 @@ fec_enet_tx_queue(struct net_device *ndev, u16 queue_id, int budget)
14281429
if (unlikely(!budget))
14291430
break;
14301431

1431-
xdpf = txq->tx_buf[index].xdp;
1432-
if (bdp->cbd_bufaddr &&
1433-
txq->tx_buf[index].type == FEC_TXBUF_T_XDP_NDO)
1434-
dma_unmap_single(&fep->pdev->dev,
1435-
fec32_to_cpu(bdp->cbd_bufaddr),
1436-
fec16_to_cpu(bdp->cbd_datlen),
1437-
DMA_TO_DEVICE);
1432+
if (txq->tx_buf[index].type == FEC_TXBUF_T_XDP_NDO) {
1433+
xdpf = txq->tx_buf[index].buf_p;
1434+
if (bdp->cbd_bufaddr)
1435+
dma_unmap_single(&fep->pdev->dev,
1436+
fec32_to_cpu(bdp->cbd_bufaddr),
1437+
fec16_to_cpu(bdp->cbd_datlen),
1438+
DMA_TO_DEVICE);
1439+
} else {
1440+
page = txq->tx_buf[index].buf_p;
1441+
}
1442+
14381443
bdp->cbd_bufaddr = cpu_to_fec32(0);
1439-
if (unlikely(!xdpf)) {
1444+
if (unlikely(!txq->tx_buf[index].buf_p)) {
14401445
txq->tx_buf[index].type = FEC_TXBUF_T_SKB;
14411446
goto tx_buf_done;
14421447
}
1448+
1449+
frame_len = fec16_to_cpu(bdp->cbd_datlen);
14431450
}
14441451

14451452
/* Check for errors. */
@@ -1463,7 +1470,7 @@ fec_enet_tx_queue(struct net_device *ndev, u16 queue_id, int budget)
14631470
if (txq->tx_buf[index].type == FEC_TXBUF_T_SKB)
14641471
ndev->stats.tx_bytes += skb->len;
14651472
else
1466-
ndev->stats.tx_bytes += xdpf->len;
1473+
ndev->stats.tx_bytes += frame_len;
14671474
}
14681475

14691476
/* Deferred means some collisions occurred during transmit,
@@ -1488,23 +1495,17 @@ fec_enet_tx_queue(struct net_device *ndev, u16 queue_id, int budget)
14881495

14891496
/* Free the sk buffer associated with this last transmit */
14901497
dev_kfree_skb_any(skb);
1491-
} else {
1492-
if (txq->tx_buf[index].type == FEC_TXBUF_T_XDP_NDO) {
1493-
xdp_return_frame_rx_napi(xdpf);
1494-
} else { /* recycle pages of XDP_TX frames */
1495-
struct page *page = virt_to_head_page(xdpf->data);
1496-
1497-
/* The dma_sync_size = 0 as XDP_TX has already
1498-
* synced DMA for_device.
1499-
*/
1500-
page_pool_put_page(page->pp, page, 0, true);
1501-
}
1502-
1503-
txq->tx_buf[index].xdp = NULL;
1504-
/* restore default tx buffer type: FEC_TXBUF_T_SKB */
1505-
txq->tx_buf[index].type = FEC_TXBUF_T_SKB;
1498+
} else if (txq->tx_buf[index].type == FEC_TXBUF_T_XDP_NDO) {
1499+
xdp_return_frame_rx_napi(xdpf);
1500+
} else { /* recycle pages of XDP_TX frames */
1501+
/* The dma_sync_size = 0 as XDP_TX has already synced DMA for_device */
1502+
page_pool_put_page(page->pp, page, 0, true);
15061503
}
15071504

1505+
txq->tx_buf[index].buf_p = NULL;
1506+
/* restore default tx buffer type: FEC_TXBUF_T_SKB */
1507+
txq->tx_buf[index].type = FEC_TXBUF_T_SKB;
1508+
15081509
tx_buf_done:
15091510
/* Make sure the update to bdp and tx_buf are performed
15101511
* before dirty_tx
@@ -3234,7 +3235,6 @@ static void fec_enet_free_buffers(struct net_device *ndev)
32343235
{
32353236
struct fec_enet_private *fep = netdev_priv(ndev);
32363237
unsigned int i;
3237-
struct sk_buff *skb;
32383238
struct fec_enet_priv_tx_q *txq;
32393239
struct fec_enet_priv_rx_q *rxq;
32403240
unsigned int q;
@@ -3259,18 +3259,23 @@ static void fec_enet_free_buffers(struct net_device *ndev)
32593259
kfree(txq->tx_bounce[i]);
32603260
txq->tx_bounce[i] = NULL;
32613261

3262+
if (!txq->tx_buf[i].buf_p) {
3263+
txq->tx_buf[i].type = FEC_TXBUF_T_SKB;
3264+
continue;
3265+
}
3266+
32623267
if (txq->tx_buf[i].type == FEC_TXBUF_T_SKB) {
3263-
skb = txq->tx_buf[i].skb;
3264-
txq->tx_buf[i].skb = NULL;
3265-
dev_kfree_skb(skb);
3268+
dev_kfree_skb(txq->tx_buf[i].buf_p);
3269+
} else if (txq->tx_buf[i].type == FEC_TXBUF_T_XDP_NDO) {
3270+
xdp_return_frame(txq->tx_buf[i].buf_p);
32663271
} else {
3267-
if (txq->tx_buf[i].xdp) {
3268-
xdp_return_frame(txq->tx_buf[i].xdp);
3269-
txq->tx_buf[i].xdp = NULL;
3270-
}
3272+
struct page *page = txq->tx_buf[i].buf_p;
32713273

3272-
txq->tx_buf[i].type = FEC_TXBUF_T_SKB;
3274+
page_pool_put_page(page->pp, page, 0, false);
32733275
}
3276+
3277+
txq->tx_buf[i].buf_p = NULL;
3278+
txq->tx_buf[i].type = FEC_TXBUF_T_SKB;
32743279
}
32753280
}
32763281
}
@@ -3793,13 +3798,14 @@ fec_enet_xdp_get_tx_queue(struct fec_enet_private *fep, int index)
37933798

37943799
static int fec_enet_txq_xmit_frame(struct fec_enet_private *fep,
37953800
struct fec_enet_priv_tx_q *txq,
3796-
struct xdp_frame *frame,
3797-
u32 dma_sync_len, bool ndo_xmit)
3801+
void *frame, u32 dma_sync_len,
3802+
bool ndo_xmit)
37983803
{
37993804
unsigned int index, status, estatus;
38003805
struct bufdesc *bdp;
38013806
dma_addr_t dma_addr;
38023807
int entries_free;
3808+
u16 frame_len;
38033809

38043810
entries_free = fec_enet_get_free_txdesc_num(txq);
38053811
if (entries_free < MAX_SKB_FRAGS + 1) {
@@ -3815,30 +3821,36 @@ static int fec_enet_txq_xmit_frame(struct fec_enet_private *fep,
38153821
index = fec_enet_get_bd_index(bdp, &txq->bd);
38163822

38173823
if (ndo_xmit) {
3818-
dma_addr = dma_map_single(&fep->pdev->dev, frame->data,
3819-
frame->len, DMA_TO_DEVICE);
3824+
struct xdp_frame *xdpf = frame;
3825+
3826+
dma_addr = dma_map_single(&fep->pdev->dev, xdpf->data,
3827+
xdpf->len, DMA_TO_DEVICE);
38203828
if (dma_mapping_error(&fep->pdev->dev, dma_addr))
38213829
return -ENOMEM;
38223830

3831+
frame_len = xdpf->len;
3832+
txq->tx_buf[index].buf_p = xdpf;
38233833
txq->tx_buf[index].type = FEC_TXBUF_T_XDP_NDO;
38243834
} else {
3825-
struct page *page = virt_to_page(frame->data);
3835+
struct xdp_buff *xdpb = frame;
3836+
struct page *page;
38263837

3827-
dma_addr = page_pool_get_dma_addr(page) + sizeof(*frame) +
3828-
frame->headroom;
3838+
page = virt_to_page(xdpb->data);
3839+
dma_addr = page_pool_get_dma_addr(page) +
3840+
(xdpb->data - xdpb->data_hard_start);
38293841
dma_sync_single_for_device(&fep->pdev->dev, dma_addr,
38303842
dma_sync_len, DMA_BIDIRECTIONAL);
3843+
frame_len = xdpb->data_end - xdpb->data;
3844+
txq->tx_buf[index].buf_p = page;
38313845
txq->tx_buf[index].type = FEC_TXBUF_T_XDP_TX;
38323846
}
38333847

3834-
txq->tx_buf[index].xdp = frame;
3835-
38363848
status |= (BD_ENET_TX_INTR | BD_ENET_TX_LAST);
38373849
if (fep->bufdesc_ex)
38383850
estatus = BD_ENET_TX_INT;
38393851

38403852
bdp->cbd_bufaddr = cpu_to_fec32(dma_addr);
3841-
bdp->cbd_datlen = cpu_to_fec16(frame->len);
3853+
bdp->cbd_datlen = cpu_to_fec16(frame_len);
38423854

38433855
if (fep->bufdesc_ex) {
38443856
struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
@@ -3879,14 +3891,10 @@ static int fec_enet_xdp_tx_xmit(struct fec_enet_private *fep,
38793891
int cpu, struct xdp_buff *xdp,
38803892
u32 dma_sync_len)
38813893
{
3882-
struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp);
38833894
struct fec_enet_priv_tx_q *txq;
38843895
struct netdev_queue *nq;
38853896
int queue, ret;
38863897

3887-
if (unlikely(!xdpf))
3888-
return -EFAULT;
3889-
38903898
queue = fec_enet_xdp_get_tx_queue(fep, cpu);
38913899
txq = fep->tx_queue[queue];
38923900
nq = netdev_get_tx_queue(fep->netdev, queue);
@@ -3895,7 +3903,7 @@ static int fec_enet_xdp_tx_xmit(struct fec_enet_private *fep,
38953903

38963904
/* Avoid tx timeout as XDP shares the queue with kernel stack */
38973905
txq_trans_cond_update(nq);
3898-
ret = fec_enet_txq_xmit_frame(fep, txq, xdpf, dma_sync_len, false);
3906+
ret = fec_enet_txq_xmit_frame(fep, txq, xdp, dma_sync_len, false);
38993907

39003908
__netif_tx_unlock(nq);
39013909

0 commit comments

Comments
 (0)