Skip to content

Commit 389ab7f

Browse files
netoptimizerAlexei Starovoitov
authored andcommitted
xdp: introduce xdp_return_frame_rx_napi
When sending an xdp_frame through xdp_do_redirect call, then error cases can happen where the xdp_frame needs to be dropped, and returning an -errno code isn't sufficient/possible any-longer (e.g. for cpumap case). This is already fully supported, by simply calling xdp_return_frame. This patch is an optimization, which provides xdp_return_frame_rx_napi, which is a faster variant for these error cases. It take advantage of the protection provided by XDP RX running under NAPI protection. This change is mostly relevant for drivers using the page_pool allocator as it can take advantage of this. (Tested with mlx5). Signed-off-by: Jesper Dangaard Brouer <[email protected]> Signed-off-by: Alexei Starovoitov <[email protected]>
1 parent 9940fbf commit 389ab7f

File tree

5 files changed

+22
-8
lines changed

5 files changed

+22
-8
lines changed

include/net/page_pool.h

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -115,13 +115,14 @@ void page_pool_destroy(struct page_pool *pool);
115115
void __page_pool_put_page(struct page_pool *pool,
116116
struct page *page, bool allow_direct);
117117

118-
static inline void page_pool_put_page(struct page_pool *pool, struct page *page)
118+
static inline void page_pool_put_page(struct page_pool *pool,
119+
struct page *page, bool allow_direct)
119120
{
120121
/* When page_pool isn't compiled-in, net/core/xdp.c doesn't
121122
* allow registering MEM_TYPE_PAGE_POOL, but shield linker.
122123
*/
123124
#ifdef CONFIG_PAGE_POOL
124-
__page_pool_put_page(pool, page, false);
125+
__page_pool_put_page(pool, page, allow_direct);
125126
#endif
126127
}
127128
/* Very limited use-cases allow recycle direct */

include/net/xdp.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -104,6 +104,7 @@ struct xdp_frame *convert_to_xdp_frame(struct xdp_buff *xdp)
104104
}
105105

106106
void xdp_return_frame(struct xdp_frame *xdpf);
107+
void xdp_return_frame_rx_napi(struct xdp_frame *xdpf);
107108
void xdp_return_buff(struct xdp_buff *xdp);
108109

109110
int xdp_rxq_info_reg(struct xdp_rxq_info *xdp_rxq,

kernel/bpf/cpumap.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -578,7 +578,7 @@ static int bq_flush_to_queue(struct bpf_cpu_map_entry *rcpu,
578578
err = __ptr_ring_produce(q, xdpf);
579579
if (err) {
580580
drops++;
581-
xdp_return_frame(xdpf);
581+
xdp_return_frame_rx_napi(xdpf);
582582
}
583583
processed++;
584584
}

kernel/bpf/devmap.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -239,7 +239,7 @@ static int bq_xmit_all(struct bpf_dtab_netdev *obj,
239239
err = dev->netdev_ops->ndo_xdp_xmit(dev, xdpf);
240240
if (err) {
241241
drops++;
242-
xdp_return_frame(xdpf);
242+
xdp_return_frame_rx_napi(xdpf);
243243
} else {
244244
sent++;
245245
}

net/core/xdp.c

Lines changed: 16 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -308,7 +308,13 @@ int xdp_rxq_info_reg_mem_model(struct xdp_rxq_info *xdp_rxq,
308308
}
309309
EXPORT_SYMBOL_GPL(xdp_rxq_info_reg_mem_model);
310310

311-
static void xdp_return(void *data, struct xdp_mem_info *mem)
311+
/* XDP RX runs under NAPI protection, and in different delivery error
312+
* scenarios (e.g. queue full), it is possible to return the xdp_frame
313+
* while still leveraging this protection. The @napi_direct boolian
314+
* is used for those calls sites. Thus, allowing for faster recycling
315+
* of xdp_frames/pages in those cases.
316+
*/
317+
static void __xdp_return(void *data, struct xdp_mem_info *mem, bool napi_direct)
312318
{
313319
struct xdp_mem_allocator *xa;
314320
struct page *page;
@@ -320,7 +326,7 @@ static void xdp_return(void *data, struct xdp_mem_info *mem)
320326
xa = rhashtable_lookup(mem_id_ht, &mem->id, mem_id_rht_params);
321327
page = virt_to_head_page(data);
322328
if (xa)
323-
page_pool_put_page(xa->page_pool, page);
329+
page_pool_put_page(xa->page_pool, page, napi_direct);
324330
else
325331
put_page(page);
326332
rcu_read_unlock();
@@ -340,12 +346,18 @@ static void xdp_return(void *data, struct xdp_mem_info *mem)
340346

341347
void xdp_return_frame(struct xdp_frame *xdpf)
342348
{
343-
xdp_return(xdpf->data, &xdpf->mem);
349+
__xdp_return(xdpf->data, &xdpf->mem, false);
344350
}
345351
EXPORT_SYMBOL_GPL(xdp_return_frame);
346352

353+
void xdp_return_frame_rx_napi(struct xdp_frame *xdpf)
354+
{
355+
__xdp_return(xdpf->data, &xdpf->mem, true);
356+
}
357+
EXPORT_SYMBOL_GPL(xdp_return_frame_rx_napi);
358+
347359
void xdp_return_buff(struct xdp_buff *xdp)
348360
{
349-
xdp_return(xdp->data, &xdp->rxq->mem);
361+
__xdp_return(xdp->data, &xdp->rxq->mem, true);
350362
}
351363
EXPORT_SYMBOL_GPL(xdp_return_buff);

0 commit comments

Comments
 (0)