Skip to content

Commit da43f0a

Browse files
committed
Merge branch 'mvneta-access-skb_shared_info-only-on-last-frag'
Lorenzo Bianconi says: ==================== mvneta: access skb_shared_info only on last frag Build skb_shared_info on mvneta_rx_swbm stack and sync it to xdp_buff skb_shared_info area only on the last fragment. Avoid avoid unnecessary xdp_buff initialization in mvneta_rx_swbm routine. This a preliminary series to complete xdp multi-buff in mvneta driver. ==================== Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Jakub Kicinski <[email protected]>
2 parents 9a71baf + 039fbc4 commit da43f0a

File tree

1 file changed

+35
-20
lines changed

1 file changed

+35
-20
lines changed

drivers/net/ethernet/marvell/mvneta.c

Lines changed: 35 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -2033,16 +2033,16 @@ int mvneta_rx_refill_queue(struct mvneta_port *pp, struct mvneta_rx_queue *rxq)
20332033

20342034
static void
20352035
mvneta_xdp_put_buff(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
2036-
struct xdp_buff *xdp, int sync_len, bool napi)
2036+
struct xdp_buff *xdp, struct skb_shared_info *sinfo,
2037+
int sync_len)
20372038
{
2038-
struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp);
20392039
int i;
20402040

20412041
for (i = 0; i < sinfo->nr_frags; i++)
20422042
page_pool_put_full_page(rxq->page_pool,
2043-
skb_frag_page(&sinfo->frags[i]), napi);
2043+
skb_frag_page(&sinfo->frags[i]), true);
20442044
page_pool_put_page(rxq->page_pool, virt_to_head_page(xdp->data),
2045-
sync_len, napi);
2045+
sync_len, true);
20462046
}
20472047

20482048
static int
@@ -2179,6 +2179,7 @@ mvneta_run_xdp(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
21792179
struct bpf_prog *prog, struct xdp_buff *xdp,
21802180
u32 frame_sz, struct mvneta_stats *stats)
21812181
{
2182+
struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp);
21822183
unsigned int len, data_len, sync;
21832184
u32 ret, act;
21842185

@@ -2199,7 +2200,7 @@ mvneta_run_xdp(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
21992200

22002201
err = xdp_do_redirect(pp->dev, xdp, prog);
22012202
if (unlikely(err)) {
2202-
mvneta_xdp_put_buff(pp, rxq, xdp, sync, true);
2203+
mvneta_xdp_put_buff(pp, rxq, xdp, sinfo, sync);
22032204
ret = MVNETA_XDP_DROPPED;
22042205
} else {
22052206
ret = MVNETA_XDP_REDIR;
@@ -2210,7 +2211,7 @@ mvneta_run_xdp(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
22102211
case XDP_TX:
22112212
ret = mvneta_xdp_xmit_back(pp, xdp);
22122213
if (ret != MVNETA_XDP_TX)
2213-
mvneta_xdp_put_buff(pp, rxq, xdp, sync, true);
2214+
mvneta_xdp_put_buff(pp, rxq, xdp, sinfo, sync);
22142215
break;
22152216
default:
22162217
bpf_warn_invalid_xdp_action(act);
@@ -2219,7 +2220,7 @@ mvneta_run_xdp(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
22192220
trace_xdp_exception(pp->dev, prog, act);
22202221
fallthrough;
22212222
case XDP_DROP:
2222-
mvneta_xdp_put_buff(pp, rxq, xdp, sync, true);
2223+
mvneta_xdp_put_buff(pp, rxq, xdp, sinfo, sync);
22232224
ret = MVNETA_XDP_DROPPED;
22242225
stats->xdp_drop++;
22252226
break;
@@ -2277,9 +2278,9 @@ mvneta_swbm_add_rx_fragment(struct mvneta_port *pp,
22772278
struct mvneta_rx_desc *rx_desc,
22782279
struct mvneta_rx_queue *rxq,
22792280
struct xdp_buff *xdp, int *size,
2281+
struct skb_shared_info *xdp_sinfo,
22802282
struct page *page)
22812283
{
2282-
struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp);
22832284
struct net_device *dev = pp->dev;
22842285
enum dma_data_direction dma_dir;
22852286
int data_len, len;
@@ -2297,13 +2298,22 @@ mvneta_swbm_add_rx_fragment(struct mvneta_port *pp,
22972298
len, dma_dir);
22982299
rx_desc->buf_phys_addr = 0;
22992300

2300-
if (data_len > 0 && sinfo->nr_frags < MAX_SKB_FRAGS) {
2301-
skb_frag_t *frag = &sinfo->frags[sinfo->nr_frags];
2301+
if (data_len > 0 && xdp_sinfo->nr_frags < MAX_SKB_FRAGS) {
2302+
skb_frag_t *frag = &xdp_sinfo->frags[xdp_sinfo->nr_frags++];
23022303

23032304
skb_frag_off_set(frag, pp->rx_offset_correction);
23042305
skb_frag_size_set(frag, data_len);
23052306
__skb_frag_set_page(frag, page);
2306-
sinfo->nr_frags++;
2307+
2308+
/* last fragment */
2309+
if (len == *size) {
2310+
struct skb_shared_info *sinfo;
2311+
2312+
sinfo = xdp_get_shared_info_from_buff(xdp);
2313+
sinfo->nr_frags = xdp_sinfo->nr_frags;
2314+
memcpy(sinfo->frags, xdp_sinfo->frags,
2315+
sinfo->nr_frags * sizeof(skb_frag_t));
2316+
}
23072317
} else {
23082318
page_pool_put_full_page(rxq->page_pool, page, true);
23092319
}
@@ -2347,13 +2357,17 @@ static int mvneta_rx_swbm(struct napi_struct *napi,
23472357
{
23482358
int rx_proc = 0, rx_todo, refill, size = 0;
23492359
struct net_device *dev = pp->dev;
2350-
struct xdp_buff xdp_buf = {
2351-
.frame_sz = PAGE_SIZE,
2352-
.rxq = &rxq->xdp_rxq,
2353-
};
2360+
struct skb_shared_info sinfo;
23542361
struct mvneta_stats ps = {};
23552362
struct bpf_prog *xdp_prog;
23562363
u32 desc_status, frame_sz;
2364+
struct xdp_buff xdp_buf;
2365+
2366+
xdp_buf.data_hard_start = NULL;
2367+
xdp_buf.frame_sz = PAGE_SIZE;
2368+
xdp_buf.rxq = &rxq->xdp_rxq;
2369+
2370+
sinfo.nr_frags = 0;
23572371

23582372
/* Get number of received packets */
23592373
rx_todo = mvneta_rxq_busy_desc_num_get(pp, rxq);
@@ -2393,19 +2407,19 @@ static int mvneta_rx_swbm(struct napi_struct *napi,
23932407
rx_desc->buf_phys_addr = 0;
23942408
page_pool_put_full_page(rxq->page_pool, page,
23952409
true);
2396-
continue;
2410+
goto next;
23972411
}
23982412

23992413
mvneta_swbm_add_rx_fragment(pp, rx_desc, rxq, &xdp_buf,
2400-
&size, page);
2414+
&size, &sinfo, page);
24012415
} /* Middle or Last descriptor */
24022416

24032417
if (!(rx_status & MVNETA_RXD_LAST_DESC))
24042418
/* no last descriptor this time */
24052419
continue;
24062420

24072421
if (size) {
2408-
mvneta_xdp_put_buff(pp, rxq, &xdp_buf, -1, true);
2422+
mvneta_xdp_put_buff(pp, rxq, &xdp_buf, &sinfo, -1);
24092423
goto next;
24102424
}
24112425

@@ -2417,7 +2431,7 @@ static int mvneta_rx_swbm(struct napi_struct *napi,
24172431
if (IS_ERR(skb)) {
24182432
struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats);
24192433

2420-
mvneta_xdp_put_buff(pp, rxq, &xdp_buf, -1, true);
2434+
mvneta_xdp_put_buff(pp, rxq, &xdp_buf, &sinfo, -1);
24212435

24222436
u64_stats_update_begin(&stats->syncp);
24232437
stats->es.skb_alloc_error++;
@@ -2434,11 +2448,12 @@ static int mvneta_rx_swbm(struct napi_struct *napi,
24342448
napi_gro_receive(napi, skb);
24352449
next:
24362450
xdp_buf.data_hard_start = NULL;
2451+
sinfo.nr_frags = 0;
24372452
}
24382453
rcu_read_unlock();
24392454

24402455
if (xdp_buf.data_hard_start)
2441-
mvneta_xdp_put_buff(pp, rxq, &xdp_buf, -1, true);
2456+
mvneta_xdp_put_buff(pp, rxq, &xdp_buf, &sinfo, -1);
24422457

24432458
if (ps.xdp_redirect)
24442459
xdp_do_flush_map();

0 commit comments

Comments
 (0)