Skip to content

Commit 0153f36

Browse files
michalQbanguy11
authored andcommitted
ice: fix Tx scheduler error handling in XDP callback
When the XDP program is loaded, the XDP callback adds new Tx queues. This means that the callback must update the Tx scheduler with the new queue number. In the event of a Tx scheduler failure, the XDP callback should also fail and roll back any changes previously made for XDP preparation. The previous implementation had a bug that not all changes made by the XDP callback were rolled back. This caused the crash with the following call trace: [ +9.549584] ice 0000:ca:00.0: Failed VSI LAN queue config for XDP, error: -5 [ +0.382335] Oops: general protection fault, probably for non-canonical address 0x50a2250a90495525: 0000 [#1] SMP NOPTI [ +0.010710] CPU: 103 UID: 0 PID: 0 Comm: swapper/103 Not tainted 6.14.0-net-next-mar-31+ #14 PREEMPT(voluntary) [ +0.010175] Hardware name: Intel Corporation M50CYP2SBSTD/M50CYP2SBSTD, BIOS SE5C620.86B.01.01.0005.2202160810 02/16/2022 [ +0.010946] RIP: 0010:__ice_update_sample+0x39/0xe0 [ice] [...] [ +0.002715] Call Trace: [ +0.002452] <IRQ> [ +0.002021] ? __die_body.cold+0x19/0x29 [ +0.003922] ? die_addr+0x3c/0x60 [ +0.003319] ? exc_general_protection+0x17c/0x400 [ +0.004707] ? asm_exc_general_protection+0x26/0x30 [ +0.004879] ? __ice_update_sample+0x39/0xe0 [ice] [ +0.004835] ice_napi_poll+0x665/0x680 [ice] [ +0.004320] __napi_poll+0x28/0x190 [ +0.003500] net_rx_action+0x198/0x360 [ +0.003752] ? update_rq_clock+0x39/0x220 [ +0.004013] handle_softirqs+0xf1/0x340 [ +0.003840] ? sched_clock_cpu+0xf/0x1f0 [ +0.003925] __irq_exit_rcu+0xc2/0xe0 [ +0.003665] common_interrupt+0x85/0xa0 [ +0.003839] </IRQ> [ +0.002098] <TASK> [ +0.002106] asm_common_interrupt+0x26/0x40 [ +0.004184] RIP: 0010:cpuidle_enter_state+0xd3/0x690 Fix this by performing the missing unmapping of XDP queues from q_vectors and setting the XDP rings pointer back to NULL after all those queues are released. Also, add an immediate exit from the XDP callback in case of ring preparation failure. Fixes: efc2214 ("ice: Add support for XDP") Reviewed-by: Dawid Osuchowski <[email protected]> Reviewed-by: Przemek Kitszel <[email protected]> Reviewed-by: Jacob Keller <[email protected]> Signed-off-by: Michal Kubiak <[email protected]> Reviewed-by: Aleksandr Loktionov <[email protected]> Reviewed-by: Simon Horman <[email protected]> Tested-by: Jesse Brandeburg <[email protected]> Tested-by: Saritha Sanigani <[email protected]> (A Contingent Worker at Intel) Signed-off-by: Tony Nguyen <[email protected]>
1 parent d3faab9 commit 0153f36

File tree

1 file changed

+33
-14
lines changed

1 file changed

+33
-14
lines changed

drivers/net/ethernet/intel/ice/ice_main.c

Lines changed: 33 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -2740,6 +2740,27 @@ void ice_map_xdp_rings(struct ice_vsi *vsi)
27402740
}
27412741
}
27422742

2743+
/**
2744+
* ice_unmap_xdp_rings - Unmap XDP rings from interrupt vectors
2745+
* @vsi: the VSI with XDP rings being unmapped
2746+
*/
2747+
static void ice_unmap_xdp_rings(struct ice_vsi *vsi)
2748+
{
2749+
int v_idx;
2750+
2751+
ice_for_each_q_vector(vsi, v_idx) {
2752+
struct ice_q_vector *q_vector = vsi->q_vectors[v_idx];
2753+
struct ice_tx_ring *ring;
2754+
2755+
ice_for_each_tx_ring(ring, q_vector->tx)
2756+
if (!ring->tx_buf || !ice_ring_is_xdp(ring))
2757+
break;
2758+
2759+
/* restore the value of last node prior to XDP setup */
2760+
q_vector->tx.tx_ring = ring;
2761+
}
2762+
}
2763+
27432764
/**
27442765
* ice_prepare_xdp_rings - Allocate, configure and setup Tx rings for XDP
27452766
* @vsi: VSI to bring up Tx rings used by XDP
@@ -2803,7 +2824,7 @@ int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog,
28032824
if (status) {
28042825
dev_err(dev, "Failed VSI LAN queue config for XDP, error: %d\n",
28052826
status);
2806-
goto clear_xdp_rings;
2827+
goto unmap_xdp_rings;
28072828
}
28082829

28092830
/* assign the prog only when it's not already present on VSI;
@@ -2819,6 +2840,8 @@ int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog,
28192840
ice_vsi_assign_bpf_prog(vsi, prog);
28202841

28212842
return 0;
2843+
unmap_xdp_rings:
2844+
ice_unmap_xdp_rings(vsi);
28222845
clear_xdp_rings:
28232846
ice_for_each_xdp_txq(vsi, i)
28242847
if (vsi->xdp_rings[i]) {
@@ -2835,6 +2858,8 @@ int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog,
28352858
mutex_unlock(&pf->avail_q_mutex);
28362859

28372860
devm_kfree(dev, vsi->xdp_rings);
2861+
vsi->xdp_rings = NULL;
2862+
28382863
return -ENOMEM;
28392864
}
28402865

@@ -2850,25 +2875,15 @@ int ice_destroy_xdp_rings(struct ice_vsi *vsi, enum ice_xdp_cfg cfg_type)
28502875
{
28512876
u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
28522877
struct ice_pf *pf = vsi->back;
2853-
int i, v_idx;
2878+
int i;
28542879

28552880
/* q_vectors are freed in reset path so there's no point in detaching
28562881
* rings
28572882
*/
28582883
if (cfg_type == ICE_XDP_CFG_PART)
28592884
goto free_qmap;
28602885

2861-
ice_for_each_q_vector(vsi, v_idx) {
2862-
struct ice_q_vector *q_vector = vsi->q_vectors[v_idx];
2863-
struct ice_tx_ring *ring;
2864-
2865-
ice_for_each_tx_ring(ring, q_vector->tx)
2866-
if (!ring->tx_buf || !ice_ring_is_xdp(ring))
2867-
break;
2868-
2869-
/* restore the value of last node prior to XDP setup */
2870-
q_vector->tx.tx_ring = ring;
2871-
}
2886+
ice_unmap_xdp_rings(vsi);
28722887

28732888
free_qmap:
28742889
mutex_lock(&pf->avail_q_mutex);
@@ -3013,11 +3028,14 @@ ice_xdp_setup_prog(struct ice_vsi *vsi, struct bpf_prog *prog,
30133028
xdp_ring_err = ice_vsi_determine_xdp_res(vsi);
30143029
if (xdp_ring_err) {
30153030
NL_SET_ERR_MSG_MOD(extack, "Not enough Tx resources for XDP");
3031+
goto resume_if;
30163032
} else {
30173033
xdp_ring_err = ice_prepare_xdp_rings(vsi, prog,
30183034
ICE_XDP_CFG_FULL);
3019-
if (xdp_ring_err)
3035+
if (xdp_ring_err) {
30203036
NL_SET_ERR_MSG_MOD(extack, "Setting up XDP Tx resources failed");
3037+
goto resume_if;
3038+
}
30213039
}
30223040
xdp_features_set_redirect_target(vsi->netdev, true);
30233041
/* reallocate Rx queues that are used for zero-copy */
@@ -3035,6 +3053,7 @@ ice_xdp_setup_prog(struct ice_vsi *vsi, struct bpf_prog *prog,
30353053
NL_SET_ERR_MSG_MOD(extack, "Freeing XDP Rx resources failed");
30363054
}
30373055

3056+
resume_if:
30383057
if (if_running)
30393058
ret = ice_up(vsi);
30403059

0 commit comments

Comments
 (0)