Skip to content

Commit 78c53ea

Browse files
committed
Merge branch 'fec-XDP_TX'
Wei Fang says: ==================== net: fec: add XDP_TX feature support This patch set is to support the XDP_TX feature of FEC driver, the first patch is add initial XDP_TX support, and the second patch improves the performance of XDP_TX by not using xdp_convert_buff_to_frame(). Please refer to the commit message of each patch for more details. ==================== Acked-by: Jesper Dangaard Brouer <[email protected]> Signed-off-by: David S. Miller <[email protected]>
2 parents e56e220 + af6f479 commit 78c53ea

File tree

2 files changed

+132
-61
lines changed

2 files changed

+132
-61
lines changed

drivers/net/ethernet/freescale/fec.h

Lines changed: 2 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -548,13 +548,11 @@ enum {
548548
enum fec_txbuf_type {
549549
FEC_TXBUF_T_SKB,
550550
FEC_TXBUF_T_XDP_NDO,
551+
FEC_TXBUF_T_XDP_TX,
551552
};
552553

553554
struct fec_tx_buffer {
554-
union {
555-
struct sk_buff *skb;
556-
struct xdp_frame *xdp;
557-
};
555+
void *buf_p;
558556
enum fec_txbuf_type type;
559557
};
560558

drivers/net/ethernet/freescale/fec_main.c

Lines changed: 130 additions & 57 deletions
Original file line numberDiff line numberDiff line change
@@ -69,13 +69,17 @@
6969
#include <soc/imx/cpuidle.h>
7070
#include <linux/filter.h>
7171
#include <linux/bpf.h>
72+
#include <linux/bpf_trace.h>
7273

7374
#include <asm/cacheflush.h>
7475

7576
#include "fec.h"
7677

7778
static void set_multicast_list(struct net_device *ndev);
7879
static void fec_enet_itr_coal_set(struct net_device *ndev);
80+
static int fec_enet_xdp_tx_xmit(struct fec_enet_private *fep,
81+
int cpu, struct xdp_buff *xdp,
82+
u32 dma_sync_len);
7983

8084
#define DRIVER_NAME "fec"
8185

@@ -396,7 +400,7 @@ static void fec_dump(struct net_device *ndev)
396400
fec16_to_cpu(bdp->cbd_sc),
397401
fec32_to_cpu(bdp->cbd_bufaddr),
398402
fec16_to_cpu(bdp->cbd_datlen),
399-
txq->tx_buf[index].skb);
403+
txq->tx_buf[index].buf_p);
400404
bdp = fec_enet_get_nextdesc(bdp, &txq->bd);
401405
index++;
402406
} while (bdp != txq->bd.base);
@@ -653,7 +657,7 @@ static int fec_enet_txq_submit_skb(struct fec_enet_priv_tx_q *txq,
653657

654658
index = fec_enet_get_bd_index(last_bdp, &txq->bd);
655659
/* Save skb pointer */
656-
txq->tx_buf[index].skb = skb;
660+
txq->tx_buf[index].buf_p = skb;
657661

658662
/* Make sure the updates to rest of the descriptor are performed before
659663
* transferring ownership.
@@ -859,7 +863,7 @@ static int fec_enet_txq_submit_tso(struct fec_enet_priv_tx_q *txq,
859863
}
860864

861865
/* Save skb pointer */
862-
txq->tx_buf[index].skb = skb;
866+
txq->tx_buf[index].buf_p = skb;
863867

864868
skb_tx_timestamp(skb);
865869
txq->bd.cur = bdp;
@@ -956,26 +960,27 @@ static void fec_enet_bd_init(struct net_device *dev)
956960
fec32_to_cpu(bdp->cbd_bufaddr),
957961
fec16_to_cpu(bdp->cbd_datlen),
958962
DMA_TO_DEVICE);
959-
if (txq->tx_buf[i].skb) {
960-
dev_kfree_skb_any(txq->tx_buf[i].skb);
961-
txq->tx_buf[i].skb = NULL;
962-
}
963-
} else {
963+
if (txq->tx_buf[i].buf_p)
964+
dev_kfree_skb_any(txq->tx_buf[i].buf_p);
965+
} else if (txq->tx_buf[i].type == FEC_TXBUF_T_XDP_NDO) {
964966
if (bdp->cbd_bufaddr)
965967
dma_unmap_single(&fep->pdev->dev,
966968
fec32_to_cpu(bdp->cbd_bufaddr),
967969
fec16_to_cpu(bdp->cbd_datlen),
968970
DMA_TO_DEVICE);
969971

970-
if (txq->tx_buf[i].xdp) {
971-
xdp_return_frame(txq->tx_buf[i].xdp);
972-
txq->tx_buf[i].xdp = NULL;
973-
}
972+
if (txq->tx_buf[i].buf_p)
973+
xdp_return_frame(txq->tx_buf[i].buf_p);
974+
} else {
975+
struct page *page = txq->tx_buf[i].buf_p;
974976

975-
/* restore default tx buffer type: FEC_TXBUF_T_SKB */
976-
txq->tx_buf[i].type = FEC_TXBUF_T_SKB;
977+
if (page)
978+
page_pool_put_page(page->pp, page, 0, false);
977979
}
978980

981+
txq->tx_buf[i].buf_p = NULL;
982+
/* restore default tx buffer type: FEC_TXBUF_T_SKB */
983+
txq->tx_buf[i].type = FEC_TXBUF_T_SKB;
979984
bdp->cbd_bufaddr = cpu_to_fec32(0);
980985
bdp = fec_enet_get_nextdesc(bdp, &txq->bd);
981986
}
@@ -1382,6 +1387,8 @@ fec_enet_tx_queue(struct net_device *ndev, u16 queue_id, int budget)
13821387
struct netdev_queue *nq;
13831388
int index = 0;
13841389
int entries_free;
1390+
struct page *page;
1391+
int frame_len;
13851392

13861393
fep = netdev_priv(ndev);
13871394

@@ -1403,8 +1410,7 @@ fec_enet_tx_queue(struct net_device *ndev, u16 queue_id, int budget)
14031410
index = fec_enet_get_bd_index(bdp, &txq->bd);
14041411

14051412
if (txq->tx_buf[index].type == FEC_TXBUF_T_SKB) {
1406-
skb = txq->tx_buf[index].skb;
1407-
txq->tx_buf[index].skb = NULL;
1413+
skb = txq->tx_buf[index].buf_p;
14081414
if (bdp->cbd_bufaddr &&
14091415
!IS_TSO_HEADER(txq, fec32_to_cpu(bdp->cbd_bufaddr)))
14101416
dma_unmap_single(&fep->pdev->dev,
@@ -1423,17 +1429,24 @@ fec_enet_tx_queue(struct net_device *ndev, u16 queue_id, int budget)
14231429
if (unlikely(!budget))
14241430
break;
14251431

1426-
xdpf = txq->tx_buf[index].xdp;
1427-
if (bdp->cbd_bufaddr)
1428-
dma_unmap_single(&fep->pdev->dev,
1429-
fec32_to_cpu(bdp->cbd_bufaddr),
1430-
fec16_to_cpu(bdp->cbd_datlen),
1431-
DMA_TO_DEVICE);
1432+
if (txq->tx_buf[index].type == FEC_TXBUF_T_XDP_NDO) {
1433+
xdpf = txq->tx_buf[index].buf_p;
1434+
if (bdp->cbd_bufaddr)
1435+
dma_unmap_single(&fep->pdev->dev,
1436+
fec32_to_cpu(bdp->cbd_bufaddr),
1437+
fec16_to_cpu(bdp->cbd_datlen),
1438+
DMA_TO_DEVICE);
1439+
} else {
1440+
page = txq->tx_buf[index].buf_p;
1441+
}
1442+
14321443
bdp->cbd_bufaddr = cpu_to_fec32(0);
1433-
if (!xdpf) {
1444+
if (unlikely(!txq->tx_buf[index].buf_p)) {
14341445
txq->tx_buf[index].type = FEC_TXBUF_T_SKB;
14351446
goto tx_buf_done;
14361447
}
1448+
1449+
frame_len = fec16_to_cpu(bdp->cbd_datlen);
14371450
}
14381451

14391452
/* Check for errors. */
@@ -1457,7 +1470,7 @@ fec_enet_tx_queue(struct net_device *ndev, u16 queue_id, int budget)
14571470
if (txq->tx_buf[index].type == FEC_TXBUF_T_SKB)
14581471
ndev->stats.tx_bytes += skb->len;
14591472
else
1460-
ndev->stats.tx_bytes += xdpf->len;
1473+
ndev->stats.tx_bytes += frame_len;
14611474
}
14621475

14631476
/* Deferred means some collisions occurred during transmit,
@@ -1482,14 +1495,17 @@ fec_enet_tx_queue(struct net_device *ndev, u16 queue_id, int budget)
14821495

14831496
/* Free the sk buffer associated with this last transmit */
14841497
dev_kfree_skb_any(skb);
1485-
} else {
1486-
xdp_return_frame(xdpf);
1487-
1488-
txq->tx_buf[index].xdp = NULL;
1489-
/* restore default tx buffer type: FEC_TXBUF_T_SKB */
1490-
txq->tx_buf[index].type = FEC_TXBUF_T_SKB;
1498+
} else if (txq->tx_buf[index].type == FEC_TXBUF_T_XDP_NDO) {
1499+
xdp_return_frame_rx_napi(xdpf);
1500+
} else { /* recycle pages of XDP_TX frames */
1501+
/* The dma_sync_size = 0 as XDP_TX has already synced DMA for_device */
1502+
page_pool_put_page(page->pp, page, 0, true);
14911503
}
14921504

1505+
txq->tx_buf[index].buf_p = NULL;
1506+
/* restore default tx buffer type: FEC_TXBUF_T_SKB */
1507+
txq->tx_buf[index].type = FEC_TXBUF_T_SKB;
1508+
14931509
tx_buf_done:
14941510
/* Make sure the update to bdp and tx_buf are performed
14951511
* before dirty_tx
@@ -1542,7 +1558,7 @@ static void fec_enet_update_cbd(struct fec_enet_priv_rx_q *rxq,
15421558

15431559
static u32
15441560
fec_enet_run_xdp(struct fec_enet_private *fep, struct bpf_prog *prog,
1545-
struct xdp_buff *xdp, struct fec_enet_priv_rx_q *rxq, int index)
1561+
struct xdp_buff *xdp, struct fec_enet_priv_rx_q *rxq, int cpu)
15461562
{
15471563
unsigned int sync, len = xdp->data_end - xdp->data;
15481564
u32 ret = FEC_ENET_XDP_PASS;
@@ -1552,8 +1568,10 @@ fec_enet_run_xdp(struct fec_enet_private *fep, struct bpf_prog *prog,
15521568

15531569
act = bpf_prog_run_xdp(prog, xdp);
15541570

1555-
/* Due xdp_adjust_tail: DMA sync for_device cover max len CPU touch */
1556-
sync = xdp->data_end - xdp->data_hard_start - FEC_ENET_XDP_HEADROOM;
1571+
/* Due xdp_adjust_tail and xdp_adjust_head: DMA sync for_device cover
1572+
* max len CPU touch
1573+
*/
1574+
sync = xdp->data_end - xdp->data;
15571575
sync = max(sync, len);
15581576

15591577
switch (act) {
@@ -1574,11 +1592,19 @@ fec_enet_run_xdp(struct fec_enet_private *fep, struct bpf_prog *prog,
15741592
}
15751593
break;
15761594

1577-
default:
1578-
bpf_warn_invalid_xdp_action(fep->netdev, prog, act);
1579-
fallthrough;
1580-
15811595
case XDP_TX:
1596+
err = fec_enet_xdp_tx_xmit(fep, cpu, xdp, sync);
1597+
if (unlikely(err)) {
1598+
ret = FEC_ENET_XDP_CONSUMED;
1599+
page = virt_to_head_page(xdp->data);
1600+
page_pool_put_page(rxq->page_pool, page, sync, true);
1601+
trace_xdp_exception(fep->netdev, prog, act);
1602+
} else {
1603+
ret = FEC_ENET_XDP_TX;
1604+
}
1605+
break;
1606+
1607+
default:
15821608
bpf_warn_invalid_xdp_action(fep->netdev, prog, act);
15831609
fallthrough;
15841610

@@ -1620,6 +1646,7 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
16201646
struct bpf_prog *xdp_prog = READ_ONCE(fep->xdp_prog);
16211647
u32 ret, xdp_result = FEC_ENET_XDP_PASS;
16221648
u32 data_start = FEC_ENET_XDP_HEADROOM;
1649+
int cpu = smp_processor_id();
16231650
struct xdp_buff xdp;
16241651
struct page *page;
16251652
u32 sub_len = 4;
@@ -1698,7 +1725,7 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
16981725
/* subtract 16bit shift and FCS */
16991726
xdp_prepare_buff(&xdp, page_address(page),
17001727
data_start, pkt_len - sub_len, false);
1701-
ret = fec_enet_run_xdp(fep, xdp_prog, &xdp, rxq, index);
1728+
ret = fec_enet_run_xdp(fep, xdp_prog, &xdp, rxq, cpu);
17021729
xdp_result |= ret;
17031730
if (ret != FEC_ENET_XDP_PASS)
17041731
goto rx_processing_done;
@@ -3208,7 +3235,6 @@ static void fec_enet_free_buffers(struct net_device *ndev)
32083235
{
32093236
struct fec_enet_private *fep = netdev_priv(ndev);
32103237
unsigned int i;
3211-
struct sk_buff *skb;
32123238
struct fec_enet_priv_tx_q *txq;
32133239
struct fec_enet_priv_rx_q *rxq;
32143240
unsigned int q;
@@ -3233,18 +3259,23 @@ static void fec_enet_free_buffers(struct net_device *ndev)
32333259
kfree(txq->tx_bounce[i]);
32343260
txq->tx_bounce[i] = NULL;
32353261

3262+
if (!txq->tx_buf[i].buf_p) {
3263+
txq->tx_buf[i].type = FEC_TXBUF_T_SKB;
3264+
continue;
3265+
}
3266+
32363267
if (txq->tx_buf[i].type == FEC_TXBUF_T_SKB) {
3237-
skb = txq->tx_buf[i].skb;
3238-
txq->tx_buf[i].skb = NULL;
3239-
dev_kfree_skb(skb);
3268+
dev_kfree_skb(txq->tx_buf[i].buf_p);
3269+
} else if (txq->tx_buf[i].type == FEC_TXBUF_T_XDP_NDO) {
3270+
xdp_return_frame(txq->tx_buf[i].buf_p);
32403271
} else {
3241-
if (txq->tx_buf[i].xdp) {
3242-
xdp_return_frame(txq->tx_buf[i].xdp);
3243-
txq->tx_buf[i].xdp = NULL;
3244-
}
3272+
struct page *page = txq->tx_buf[i].buf_p;
32453273

3246-
txq->tx_buf[i].type = FEC_TXBUF_T_SKB;
3274+
page_pool_put_page(page->pp, page, 0, false);
32473275
}
3276+
3277+
txq->tx_buf[i].buf_p = NULL;
3278+
txq->tx_buf[i].type = FEC_TXBUF_T_SKB;
32483279
}
32493280
}
32503281
}
@@ -3767,12 +3798,14 @@ fec_enet_xdp_get_tx_queue(struct fec_enet_private *fep, int index)
37673798

37683799
static int fec_enet_txq_xmit_frame(struct fec_enet_private *fep,
37693800
struct fec_enet_priv_tx_q *txq,
3770-
struct xdp_frame *frame)
3801+
void *frame, u32 dma_sync_len,
3802+
bool ndo_xmit)
37713803
{
37723804
unsigned int index, status, estatus;
37733805
struct bufdesc *bdp;
37743806
dma_addr_t dma_addr;
37753807
int entries_free;
3808+
u16 frame_len;
37763809

37773810
entries_free = fec_enet_get_free_txdesc_num(txq);
37783811
if (entries_free < MAX_SKB_FRAGS + 1) {
@@ -3787,17 +3820,37 @@ static int fec_enet_txq_xmit_frame(struct fec_enet_private *fep,
37873820

37883821
index = fec_enet_get_bd_index(bdp, &txq->bd);
37893822

3790-
dma_addr = dma_map_single(&fep->pdev->dev, frame->data,
3791-
frame->len, DMA_TO_DEVICE);
3792-
if (dma_mapping_error(&fep->pdev->dev, dma_addr))
3793-
return -ENOMEM;
3823+
if (ndo_xmit) {
3824+
struct xdp_frame *xdpf = frame;
3825+
3826+
dma_addr = dma_map_single(&fep->pdev->dev, xdpf->data,
3827+
xdpf->len, DMA_TO_DEVICE);
3828+
if (dma_mapping_error(&fep->pdev->dev, dma_addr))
3829+
return -ENOMEM;
3830+
3831+
frame_len = xdpf->len;
3832+
txq->tx_buf[index].buf_p = xdpf;
3833+
txq->tx_buf[index].type = FEC_TXBUF_T_XDP_NDO;
3834+
} else {
3835+
struct xdp_buff *xdpb = frame;
3836+
struct page *page;
3837+
3838+
page = virt_to_page(xdpb->data);
3839+
dma_addr = page_pool_get_dma_addr(page) +
3840+
(xdpb->data - xdpb->data_hard_start);
3841+
dma_sync_single_for_device(&fep->pdev->dev, dma_addr,
3842+
dma_sync_len, DMA_BIDIRECTIONAL);
3843+
frame_len = xdpb->data_end - xdpb->data;
3844+
txq->tx_buf[index].buf_p = page;
3845+
txq->tx_buf[index].type = FEC_TXBUF_T_XDP_TX;
3846+
}
37943847

37953848
status |= (BD_ENET_TX_INTR | BD_ENET_TX_LAST);
37963849
if (fep->bufdesc_ex)
37973850
estatus = BD_ENET_TX_INT;
37983851

37993852
bdp->cbd_bufaddr = cpu_to_fec32(dma_addr);
3800-
bdp->cbd_datlen = cpu_to_fec16(frame->len);
3853+
bdp->cbd_datlen = cpu_to_fec16(frame_len);
38013854

38023855
if (fep->bufdesc_ex) {
38033856
struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
@@ -3809,9 +3862,6 @@ static int fec_enet_txq_xmit_frame(struct fec_enet_private *fep,
38093862
ebdp->cbd_esc = cpu_to_fec32(estatus);
38103863
}
38113864

3812-
txq->tx_buf[index].type = FEC_TXBUF_T_XDP_NDO;
3813-
txq->tx_buf[index].xdp = frame;
3814-
38153865
/* Make sure the updates to rest of the descriptor are performed before
38163866
* transferring ownership.
38173867
*/
@@ -3837,6 +3887,29 @@ static int fec_enet_txq_xmit_frame(struct fec_enet_private *fep,
38373887
return 0;
38383888
}
38393889

3890+
static int fec_enet_xdp_tx_xmit(struct fec_enet_private *fep,
3891+
int cpu, struct xdp_buff *xdp,
3892+
u32 dma_sync_len)
3893+
{
3894+
struct fec_enet_priv_tx_q *txq;
3895+
struct netdev_queue *nq;
3896+
int queue, ret;
3897+
3898+
queue = fec_enet_xdp_get_tx_queue(fep, cpu);
3899+
txq = fep->tx_queue[queue];
3900+
nq = netdev_get_tx_queue(fep->netdev, queue);
3901+
3902+
__netif_tx_lock(nq, cpu);
3903+
3904+
/* Avoid tx timeout as XDP shares the queue with kernel stack */
3905+
txq_trans_cond_update(nq);
3906+
ret = fec_enet_txq_xmit_frame(fep, txq, xdp, dma_sync_len, false);
3907+
3908+
__netif_tx_unlock(nq);
3909+
3910+
return ret;
3911+
}
3912+
38403913
static int fec_enet_xdp_xmit(struct net_device *dev,
38413914
int num_frames,
38423915
struct xdp_frame **frames,
@@ -3859,7 +3932,7 @@ static int fec_enet_xdp_xmit(struct net_device *dev,
38593932
/* Avoid tx timeout as XDP shares the queue with kernel stack */
38603933
txq_trans_cond_update(nq);
38613934
for (i = 0; i < num_frames; i++) {
3862-
if (fec_enet_txq_xmit_frame(fep, txq, frames[i]) < 0)
3935+
if (fec_enet_txq_xmit_frame(fep, txq, frames[i], 0, true) < 0)
38633936
break;
38643937
sent_frames++;
38653938
}

0 commit comments

Comments
 (0)