Skip to content

Commit b9eef33

Browse files
Sebastian Andrzej SiewiorPaolo Abeni
authored andcommitted
xdp: Use nested-BH locking for system_page_pool
system_page_pool is a per-CPU variable and relies on disabled BH for its locking. Without per-CPU locking in local_bh_disable() on PREEMPT_RT this data structure requires explicit locking. Make a struct with a page_pool member (original system_page_pool) and a local_lock_t and use local_lock_nested_bh() for locking. This change adds only lockdep coverage and does not alter the functional behaviour for !PREEMPT_RT. Cc: Andrew Lunn <[email protected]> Cc: Alexei Starovoitov <[email protected]> Cc: Daniel Borkmann <[email protected]> Cc: Jesper Dangaard Brouer <[email protected]> Cc: John Fastabend <[email protected]> Reviewed-by: Toke Høiland-Jørgensen <[email protected]> Signed-off-by: Sebastian Andrzej Siewior <[email protected]> Link: https://patch.msgid.link/[email protected] Signed-off-by: Paolo Abeni <[email protected]>
1 parent bc57eda commit b9eef33

File tree

3 files changed

+26
-11
lines changed

3 files changed

+26
-11
lines changed

include/linux/netdevice.h

Lines changed: 6 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -3503,7 +3503,12 @@ struct softnet_data {
35033503
};
35043504

35053505
DECLARE_PER_CPU_ALIGNED(struct softnet_data, softnet_data);
3506-
DECLARE_PER_CPU(struct page_pool *, system_page_pool);
3506+
3507+
struct page_pool_bh {
3508+
struct page_pool *pool;
3509+
local_lock_t bh_lock;
3510+
};
3511+
DECLARE_PER_CPU(struct page_pool_bh, system_page_pool);
35073512

35083513
#ifndef CONFIG_PREEMPT_RT
35093514
static inline int dev_recursion_level(void)

net/core/dev.c

Lines changed: 10 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -462,7 +462,9 @@ EXPORT_PER_CPU_SYMBOL(softnet_data);
462462
* PP consumers must pay attention to run APIs in the appropriate context
463463
* (e.g. NAPI context).
464464
*/
465-
DEFINE_PER_CPU(struct page_pool *, system_page_pool);
465+
DEFINE_PER_CPU(struct page_pool_bh, system_page_pool) = {
466+
.bh_lock = INIT_LOCAL_LOCK(bh_lock),
467+
};
466468

467469
#ifdef CONFIG_LOCKDEP
468470
/*
@@ -5322,7 +5324,10 @@ netif_skb_check_for_xdp(struct sk_buff **pskb, const struct bpf_prog *prog)
53225324
struct sk_buff *skb = *pskb;
53235325
int err, hroom, troom;
53245326

5325-
if (!skb_cow_data_for_xdp(this_cpu_read(system_page_pool), pskb, prog))
5327+
local_lock_nested_bh(&system_page_pool.bh_lock);
5328+
err = skb_cow_data_for_xdp(this_cpu_read(system_page_pool.pool), pskb, prog);
5329+
local_unlock_nested_bh(&system_page_pool.bh_lock);
5330+
if (!err)
53265331
return 0;
53275332

53285333
/* In case we have to go down the path and also linearize,
@@ -12712,7 +12717,7 @@ static int net_page_pool_create(int cpuid)
1271212717
return err;
1271312718
}
1271412719

12715-
per_cpu(system_page_pool, cpuid) = pp_ptr;
12720+
per_cpu(system_page_pool.pool, cpuid) = pp_ptr;
1271612721
#endif
1271712722
return 0;
1271812723
}
@@ -12842,13 +12847,13 @@ static int __init net_dev_init(void)
1284212847
for_each_possible_cpu(i) {
1284312848
struct page_pool *pp_ptr;
1284412849

12845-
pp_ptr = per_cpu(system_page_pool, i);
12850+
pp_ptr = per_cpu(system_page_pool.pool, i);
1284612851
if (!pp_ptr)
1284712852
continue;
1284812853

1284912854
xdp_unreg_page_pool(pp_ptr);
1285012855
page_pool_destroy(pp_ptr);
12851-
per_cpu(system_page_pool, i) = NULL;
12856+
per_cpu(system_page_pool.pool, i) = NULL;
1285212857
}
1285312858
}
1285412859

net/core/xdp.c

Lines changed: 10 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -739,25 +739,27 @@ static noinline bool xdp_copy_frags_from_zc(struct sk_buff *skb,
739739
*/
740740
struct sk_buff *xdp_build_skb_from_zc(struct xdp_buff *xdp)
741741
{
742-
struct page_pool *pp = this_cpu_read(system_page_pool);
743742
const struct xdp_rxq_info *rxq = xdp->rxq;
744743
u32 len = xdp->data_end - xdp->data_meta;
745744
u32 truesize = xdp->frame_sz;
746-
struct sk_buff *skb;
745+
struct sk_buff *skb = NULL;
746+
struct page_pool *pp;
747747
int metalen;
748748
void *data;
749749

750750
if (!IS_ENABLED(CONFIG_PAGE_POOL))
751751
return NULL;
752752

753+
local_lock_nested_bh(&system_page_pool.bh_lock);
754+
pp = this_cpu_read(system_page_pool.pool);
753755
data = page_pool_dev_alloc_va(pp, &truesize);
754756
if (unlikely(!data))
755-
return NULL;
757+
goto out;
756758

757759
skb = napi_build_skb(data, truesize);
758760
if (unlikely(!skb)) {
759761
page_pool_free_va(pp, data, true);
760-
return NULL;
762+
goto out;
761763
}
762764

763765
skb_mark_for_recycle(skb);
@@ -776,13 +778,16 @@ struct sk_buff *xdp_build_skb_from_zc(struct xdp_buff *xdp)
776778
if (unlikely(xdp_buff_has_frags(xdp)) &&
777779
unlikely(!xdp_copy_frags_from_zc(skb, xdp, pp))) {
778780
napi_consume_skb(skb, true);
779-
return NULL;
781+
skb = NULL;
782+
goto out;
780783
}
781784

782785
xsk_buff_free(xdp);
783786

784787
skb->protocol = eth_type_trans(skb, rxq->dev);
785788

789+
out:
790+
local_unlock_nested_bh(&system_page_pool.bh_lock);
786791
return skb;
787792
}
788793
EXPORT_SYMBOL_GPL(xdp_build_skb_from_zc);

0 commit comments

Comments
 (0)