Skip to content

Commit 22bf877

Browse files
mfijalkoanguy11
authored andcommitted
ice: introduce XDP_TX fallback path
Under rare circumstances there might be a situation where a requirement of having XDP Tx queue per CPU could not be fulfilled and some of the Tx resources have to be shared between CPUs. This yields a need for placing accesses to xdp_ring inside a critical section protected by spinlock. These accesses happen to be in the hot path, so let's introduce the static branch that will be triggered from the control plane when driver could not provide Tx queue dedicated for XDP on each CPU. Currently, the design that has been picked is to allow any number of XDP Tx queues that is at least half of a count of CPUs that platform has. For lower number driver will bail out with a response to user that there were not enough Tx resources that would allow configuring XDP. The sharing of rings is signalled via static branch enablement which in turn indicates that lock for xdp_ring accesses needs to be taken in hot path. Approach based on static branch has no impact on performance of a non-fallback path. One thing that is needed to be mentioned is a fact that the static branch will act as a global driver switch, meaning that if one PF got out of Tx resources, then other PFs that ice driver is servicing will suffer. However, given the fact that HW that ice driver is handling has 1024 Tx queues per each PF, this is currently an unlikely scenario. Signed-off-by: Maciej Fijalkowski <[email protected]> Tested-by: George Kuruvinakunnel <[email protected]> Signed-off-by: Tony Nguyen <[email protected]>
1 parent 9610bd9 commit 22bf877

File tree

6 files changed

+75
-9
lines changed

6 files changed

+75
-9
lines changed

drivers/net/ethernet/intel/ice/ice.h

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -167,6 +167,8 @@ enum ice_feature {
167167
ICE_F_MAX
168168
};
169169

170+
DECLARE_STATIC_KEY_FALSE(ice_xdp_locking_key);
171+
170172
struct ice_txq_meta {
171173
u32 q_teid; /* Tx-scheduler element identifier */
172174
u16 q_id; /* Entry in VSI's txq_map bitmap */
@@ -716,6 +718,7 @@ int ice_up(struct ice_vsi *vsi);
716718
int ice_down(struct ice_vsi *vsi);
717719
int ice_vsi_cfg(struct ice_vsi *vsi);
718720
struct ice_vsi *ice_lb_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi);
721+
int ice_vsi_determine_xdp_res(struct ice_vsi *vsi);
719722
int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog);
720723
int ice_destroy_xdp_rings(struct ice_vsi *vsi);
721724
int

drivers/net/ethernet/intel/ice/ice_lib.c

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -3215,7 +3215,9 @@ int ice_vsi_rebuild(struct ice_vsi *vsi, bool init_vsi)
32153215

32163216
ice_vsi_map_rings_to_vectors(vsi);
32173217
if (ice_is_xdp_ena_vsi(vsi)) {
3218-
vsi->num_xdp_txq = num_possible_cpus();
3218+
ret = ice_vsi_determine_xdp_res(vsi);
3219+
if (ret)
3220+
goto err_vectors;
32193221
ret = ice_prepare_xdp_rings(vsi, vsi->xdp_prog);
32203222
if (ret)
32213223
goto err_vectors;

drivers/net/ethernet/intel/ice/ice_main.c

Lines changed: 47 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -44,6 +44,8 @@ MODULE_PARM_DESC(debug, "netif level (0=none,...,16=all)");
4444
#endif /* !CONFIG_DYNAMIC_DEBUG */
4545

4646
static DEFINE_IDA(ice_aux_ida);
47+
DEFINE_STATIC_KEY_FALSE(ice_xdp_locking_key);
48+
EXPORT_SYMBOL(ice_xdp_locking_key);
4749

4850
static struct workqueue_struct *ice_wq;
4951
static const struct net_device_ops ice_netdev_safe_mode_ops;
@@ -2397,14 +2399,19 @@ static int ice_xdp_alloc_setup_rings(struct ice_vsi *vsi)
23972399
goto free_xdp_rings;
23982400
ice_set_ring_xdp(xdp_ring);
23992401
xdp_ring->xsk_pool = ice_tx_xsk_pool(xdp_ring);
2402+
spin_lock_init(&xdp_ring->tx_lock);
24002403
for (j = 0; j < xdp_ring->count; j++) {
24012404
tx_desc = ICE_TX_DESC(xdp_ring, j);
24022405
tx_desc->cmd_type_offset_bsz = cpu_to_le64(ICE_TX_DESC_DTYPE_DESC_DONE);
24032406
}
24042407
}
24052408

2406-
ice_for_each_rxq(vsi, i)
2407-
vsi->rx_rings[i]->xdp_ring = vsi->xdp_rings[i];
2409+
ice_for_each_rxq(vsi, i) {
2410+
if (static_key_enabled(&ice_xdp_locking_key))
2411+
vsi->rx_rings[i]->xdp_ring = vsi->xdp_rings[i % vsi->num_xdp_txq];
2412+
else
2413+
vsi->rx_rings[i]->xdp_ring = vsi->xdp_rings[i];
2414+
}
24082415

24092416
return 0;
24102417

@@ -2469,6 +2476,10 @@ int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog)
24692476
if (__ice_vsi_get_qs(&xdp_qs_cfg))
24702477
goto err_map_xdp;
24712478

2479+
if (static_key_enabled(&ice_xdp_locking_key))
2480+
netdev_warn(vsi->netdev,
2481+
"Could not allocate one XDP Tx ring per CPU, XDP_TX/XDP_REDIRECT actions will be slower\n");
2482+
24722483
if (ice_xdp_alloc_setup_rings(vsi))
24732484
goto clear_xdp_rings;
24742485

@@ -2585,6 +2596,9 @@ int ice_destroy_xdp_rings(struct ice_vsi *vsi)
25852596
devm_kfree(ice_pf_to_dev(pf), vsi->xdp_rings);
25862597
vsi->xdp_rings = NULL;
25872598

2599+
if (static_key_enabled(&ice_xdp_locking_key))
2600+
static_branch_dec(&ice_xdp_locking_key);
2601+
25882602
if (ice_is_reset_in_progress(pf->state) || !vsi->q_vectors[0])
25892603
return 0;
25902604

@@ -2619,6 +2633,29 @@ static void ice_vsi_rx_napi_schedule(struct ice_vsi *vsi)
26192633
}
26202634
}
26212635

2636+
/**
2637+
* ice_vsi_determine_xdp_res - figure out how many Tx qs can XDP have
2638+
* @vsi: VSI to determine the count of XDP Tx qs
2639+
*
2640+
* returns 0 if Tx qs count is higher than at least half of CPU count,
2641+
* -ENOMEM otherwise
2642+
*/
2643+
int ice_vsi_determine_xdp_res(struct ice_vsi *vsi)
2644+
{
2645+
u16 avail = ice_get_avail_txq_count(vsi->back);
2646+
u16 cpus = num_possible_cpus();
2647+
2648+
if (avail < cpus / 2)
2649+
return -ENOMEM;
2650+
2651+
vsi->num_xdp_txq = min_t(u16, avail, cpus);
2652+
2653+
if (vsi->num_xdp_txq < cpus)
2654+
static_branch_inc(&ice_xdp_locking_key);
2655+
2656+
return 0;
2657+
}
2658+
26222659
/**
26232660
* ice_xdp_setup_prog - Add or remove XDP eBPF program
26242661
* @vsi: VSI to setup XDP for
@@ -2648,10 +2685,14 @@ ice_xdp_setup_prog(struct ice_vsi *vsi, struct bpf_prog *prog,
26482685
}
26492686

26502687
if (!ice_is_xdp_ena_vsi(vsi) && prog) {
2651-
vsi->num_xdp_txq = num_possible_cpus();
2652-
xdp_ring_err = ice_prepare_xdp_rings(vsi, prog);
2653-
if (xdp_ring_err)
2654-
NL_SET_ERR_MSG_MOD(extack, "Setting up XDP Tx resources failed");
2688+
xdp_ring_err = ice_vsi_determine_xdp_res(vsi);
2689+
if (xdp_ring_err) {
2690+
NL_SET_ERR_MSG_MOD(extack, "Not enough Tx resources for XDP");
2691+
} else {
2692+
xdp_ring_err = ice_prepare_xdp_rings(vsi, prog);
2693+
if (xdp_ring_err)
2694+
NL_SET_ERR_MSG_MOD(extack, "Setting up XDP Tx resources failed");
2695+
}
26552696
} else if (ice_is_xdp_ena_vsi(vsi) && !prog) {
26562697
xdp_ring_err = ice_destroy_xdp_rings(vsi);
26572698
if (xdp_ring_err)

drivers/net/ethernet/intel/ice/ice_txrx.c

Lines changed: 15 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -547,7 +547,11 @@ ice_run_xdp(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp,
547547
case XDP_PASS:
548548
return ICE_XDP_PASS;
549549
case XDP_TX:
550+
if (static_branch_unlikely(&ice_xdp_locking_key))
551+
spin_lock(&xdp_ring->tx_lock);
550552
err = ice_xmit_xdp_ring(xdp->data, xdp->data_end - xdp->data, xdp_ring);
553+
if (static_branch_unlikely(&ice_xdp_locking_key))
554+
spin_unlock(&xdp_ring->tx_lock);
551555
if (err == ICE_XDP_CONSUMED)
552556
goto out_failure;
553557
return err;
@@ -599,7 +603,14 @@ ice_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
599603
if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
600604
return -EINVAL;
601605

602-
xdp_ring = vsi->xdp_rings[queue_index];
606+
if (static_branch_unlikely(&ice_xdp_locking_key)) {
607+
queue_index %= vsi->num_xdp_txq;
608+
xdp_ring = vsi->xdp_rings[queue_index];
609+
spin_lock(&xdp_ring->tx_lock);
610+
} else {
611+
xdp_ring = vsi->xdp_rings[queue_index];
612+
}
613+
603614
for (i = 0; i < n; i++) {
604615
struct xdp_frame *xdpf = frames[i];
605616
int err;
@@ -613,6 +624,9 @@ ice_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
613624
if (unlikely(flags & XDP_XMIT_FLUSH))
614625
ice_xdp_ring_update_tail(xdp_ring);
615626

627+
if (static_branch_unlikely(&ice_xdp_locking_key))
628+
spin_unlock(&xdp_ring->tx_lock);
629+
616630
return nxmit;
617631
}
618632

drivers/net/ethernet/intel/ice/ice_txrx.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -329,6 +329,7 @@ struct ice_tx_ring {
329329
struct rcu_head rcu; /* to avoid race on free */
330330
DECLARE_BITMAP(xps_state, ICE_TX_NBITS); /* XPS Config State */
331331
struct ice_ptp_tx *tx_tstamps;
332+
spinlock_t tx_lock;
332333
u32 txq_teid; /* Added Tx queue TEID */
333334
#define ICE_TX_FLAGS_RING_XDP BIT(0)
334335
u8 flags;

drivers/net/ethernet/intel/ice/ice_txrx_lib.c

Lines changed: 6 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -350,6 +350,11 @@ void ice_finalize_xdp_rx(struct ice_tx_ring *xdp_ring, unsigned int xdp_res)
350350
if (xdp_res & ICE_XDP_REDIR)
351351
xdp_do_flush_map();
352352

353-
if (xdp_res & ICE_XDP_TX)
353+
if (xdp_res & ICE_XDP_TX) {
354+
if (static_branch_unlikely(&ice_xdp_locking_key))
355+
spin_lock(&xdp_ring->tx_lock);
354356
ice_xdp_ring_update_tail(xdp_ring);
357+
if (static_branch_unlikely(&ice_xdp_locking_key))
358+
spin_unlock(&xdp_ring->tx_lock);
359+
}
355360
}

0 commit comments

Comments
 (0)