Skip to content

Commit c5a2a4a

Browse files
uketineniJeff Kirsher
authored andcommitted
ice: Fix to make VLAN priority tagged traffic to appear on all TCs
This patch includes below changes to resolve the issue of ETS bandwidth shaping to work. 1. Allocation of Tx queues is accounted for based on the enabled TC's in ice_vsi_setup_q_map() and enabled the Tx queues on those TC's via ice_vsi_cfg_txqs() 2. Get the mapped netdev TC # for the user priority and set the priority to TC mapping for the VSI. Signed-off-by: Usha Ketineni <[email protected]> Signed-off-by: Anirudh Venkataramanan <[email protected]> Tested-by: Andrew Bowers <[email protected]> Signed-off-by: Jeff Kirsher <[email protected]>
1 parent 99fc105 commit c5a2a4a

File tree

5 files changed

+81
-51
lines changed

5 files changed

+81
-51
lines changed

drivers/net/ethernet/intel/ice/ice.h

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -112,7 +112,9 @@ extern const char ice_drv_ver[];
112112

113113
struct ice_tc_info {
114114
u16 qoffset;
115-
u16 qcount;
115+
u16 qcount_tx;
116+
u16 qcount_rx;
117+
u8 netdev_tc;
116118
};
117119

118120
struct ice_tc_cfg {

drivers/net/ethernet/intel/ice/ice_lib.c

Lines changed: 73 additions & 48 deletions
Original file line numberDiff line numberDiff line change
@@ -774,11 +774,13 @@ static void ice_set_dflt_vsi_ctx(struct ice_vsi_ctx *ctxt)
774774
*/
775775
static void ice_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt)
776776
{
777-
u16 offset = 0, qmap = 0, numq_tc;
778-
u16 pow = 0, max_rss = 0, qcount;
777+
u16 offset = 0, qmap = 0, tx_count = 0;
779778
u16 qcount_tx = vsi->alloc_txq;
780779
u16 qcount_rx = vsi->alloc_rxq;
780+
u16 tx_numq_tc, rx_numq_tc;
781+
u16 pow = 0, max_rss = 0;
781782
bool ena_tc0 = false;
783+
u8 netdev_tc = 0;
782784
int i;
783785

784786
/* at least TC0 should be enabled by default */
@@ -794,7 +796,12 @@ static void ice_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt)
794796
vsi->tc_cfg.ena_tc |= 1;
795797
}
796798

797-
numq_tc = qcount_rx / vsi->tc_cfg.numtc;
799+
rx_numq_tc = qcount_rx / vsi->tc_cfg.numtc;
800+
if (!rx_numq_tc)
801+
rx_numq_tc = 1;
802+
tx_numq_tc = qcount_tx / vsi->tc_cfg.numtc;
803+
if (!tx_numq_tc)
804+
tx_numq_tc = 1;
798805

799806
/* TC mapping is a function of the number of Rx queues assigned to the
800807
* VSI for each traffic class and the offset of these queues.
@@ -808,45 +815,50 @@ static void ice_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt)
808815
* Setup number and offset of Rx queues for all TCs for the VSI
809816
*/
810817

811-
qcount = numq_tc;
818+
qcount_rx = rx_numq_tc;
819+
812820
/* qcount will change if RSS is enabled */
813821
if (test_bit(ICE_FLAG_RSS_ENA, vsi->back->flags)) {
814822
if (vsi->type == ICE_VSI_PF || vsi->type == ICE_VSI_VF) {
815823
if (vsi->type == ICE_VSI_PF)
816824
max_rss = ICE_MAX_LG_RSS_QS;
817825
else
818826
max_rss = ICE_MAX_SMALL_RSS_QS;
819-
qcount = min_t(int, numq_tc, max_rss);
820-
qcount = min_t(int, qcount, vsi->rss_size);
827+
qcount_rx = min_t(int, rx_numq_tc, max_rss);
828+
qcount_rx = min_t(int, qcount_rx, vsi->rss_size);
821829
}
822830
}
823831

824832
/* find the (rounded up) power-of-2 of qcount */
825-
pow = order_base_2(qcount);
833+
pow = order_base_2(qcount_rx);
826834

827835
for (i = 0; i < ICE_MAX_TRAFFIC_CLASS; i++) {
828836
if (!(vsi->tc_cfg.ena_tc & BIT(i))) {
829837
/* TC is not enabled */
830838
vsi->tc_cfg.tc_info[i].qoffset = 0;
831-
vsi->tc_cfg.tc_info[i].qcount = 1;
839+
vsi->tc_cfg.tc_info[i].qcount_rx = 1;
840+
vsi->tc_cfg.tc_info[i].qcount_tx = 1;
841+
vsi->tc_cfg.tc_info[i].netdev_tc = 0;
832842
ctxt->info.tc_mapping[i] = 0;
833843
continue;
834844
}
835845

836846
/* TC is enabled */
837847
vsi->tc_cfg.tc_info[i].qoffset = offset;
838-
vsi->tc_cfg.tc_info[i].qcount = qcount;
848+
vsi->tc_cfg.tc_info[i].qcount_rx = qcount_rx;
849+
vsi->tc_cfg.tc_info[i].qcount_tx = tx_numq_tc;
850+
vsi->tc_cfg.tc_info[i].netdev_tc = netdev_tc++;
839851

840852
qmap = ((offset << ICE_AQ_VSI_TC_Q_OFFSET_S) &
841853
ICE_AQ_VSI_TC_Q_OFFSET_M) |
842854
((pow << ICE_AQ_VSI_TC_Q_NUM_S) &
843855
ICE_AQ_VSI_TC_Q_NUM_M);
844-
offset += qcount;
856+
offset += qcount_rx;
857+
tx_count += tx_numq_tc;
845858
ctxt->info.tc_mapping[i] = cpu_to_le16(qmap);
846859
}
847-
848-
vsi->num_txq = qcount_tx;
849860
vsi->num_rxq = offset;
861+
vsi->num_txq = tx_count;
850862

851863
if (vsi->type == ICE_VSI_VF && vsi->num_txq != vsi->num_rxq) {
852864
dev_dbg(&vsi->back->pdev->dev, "VF VSI should have same number of Tx and Rx queues. Hence making them equal\n");
@@ -1611,10 +1623,10 @@ int ice_vsi_cfg_txqs(struct ice_vsi *vsi)
16111623
struct ice_aqc_add_tx_qgrp *qg_buf;
16121624
struct ice_aqc_add_txqs_perq *txq;
16131625
struct ice_pf *pf = vsi->back;
1626+
u8 num_q_grps, q_idx = 0;
16141627
enum ice_status status;
16151628
u16 buf_len, i, pf_q;
16161629
int err = 0, tc = 0;
1617-
u8 num_q_grps;
16181630

16191631
buf_len = sizeof(struct ice_aqc_add_tx_qgrp);
16201632
qg_buf = devm_kzalloc(&pf->pdev->dev, buf_len, GFP_KERNEL);
@@ -1628,38 +1640,49 @@ int ice_vsi_cfg_txqs(struct ice_vsi *vsi)
16281640
qg_buf->num_txqs = 1;
16291641
num_q_grps = 1;
16301642

1631-
/* set up and configure the Tx queues */
1632-
ice_for_each_txq(vsi, i) {
1633-
struct ice_tlan_ctx tlan_ctx = { 0 };
1643+
/* set up and configure the Tx queues for each enabled TC */
1644+
for (tc = 0; tc < ICE_MAX_TRAFFIC_CLASS; tc++) {
1645+
if (!(vsi->tc_cfg.ena_tc & BIT(tc)))
1646+
break;
16341647

1635-
pf_q = vsi->txq_map[i];
1636-
ice_setup_tx_ctx(vsi->tx_rings[i], &tlan_ctx, pf_q);
1637-
/* copy context contents into the qg_buf */
1638-
qg_buf->txqs[0].txq_id = cpu_to_le16(pf_q);
1639-
ice_set_ctx((u8 *)&tlan_ctx, qg_buf->txqs[0].txq_ctx,
1640-
ice_tlan_ctx_info);
1648+
for (i = 0; i < vsi->tc_cfg.tc_info[tc].qcount_tx; i++) {
1649+
struct ice_tlan_ctx tlan_ctx = { 0 };
1650+
1651+
pf_q = vsi->txq_map[q_idx];
1652+
ice_setup_tx_ctx(vsi->tx_rings[q_idx], &tlan_ctx,
1653+
pf_q);
1654+
/* copy context contents into the qg_buf */
1655+
qg_buf->txqs[0].txq_id = cpu_to_le16(pf_q);
1656+
ice_set_ctx((u8 *)&tlan_ctx, qg_buf->txqs[0].txq_ctx,
1657+
ice_tlan_ctx_info);
1658+
1659+
/* init queue specific tail reg. It is referred as
1660+
* transmit comm scheduler queue doorbell.
1661+
*/
1662+
vsi->tx_rings[q_idx]->tail =
1663+
pf->hw.hw_addr + QTX_COMM_DBELL(pf_q);
1664+
status = ice_ena_vsi_txq(vsi->port_info, vsi->idx, tc,
1665+
num_q_grps, qg_buf, buf_len,
1666+
NULL);
1667+
if (status) {
1668+
dev_err(&vsi->back->pdev->dev,
1669+
"Failed to set LAN Tx queue context, error: %d\n",
1670+
status);
1671+
err = -ENODEV;
1672+
goto err_cfg_txqs;
1673+
}
16411674

1642-
/* init queue specific tail reg. It is referred as transmit
1643-
* comm scheduler queue doorbell.
1644-
*/
1645-
vsi->tx_rings[i]->tail = pf->hw.hw_addr + QTX_COMM_DBELL(pf_q);
1646-
status = ice_ena_vsi_txq(vsi->port_info, vsi->idx, tc,
1647-
num_q_grps, qg_buf, buf_len, NULL);
1648-
if (status) {
1649-
dev_err(&vsi->back->pdev->dev,
1650-
"Failed to set LAN Tx queue context, error: %d\n",
1651-
status);
1652-
err = -ENODEV;
1653-
goto err_cfg_txqs;
1654-
}
1675+
/* Add Tx Queue TEID into the VSI Tx ring from the
1676+
* response. This will complete configuring and
1677+
* enabling the queue.
1678+
*/
1679+
txq = &qg_buf->txqs[0];
1680+
if (pf_q == le16_to_cpu(txq->txq_id))
1681+
vsi->tx_rings[q_idx]->txq_teid =
1682+
le32_to_cpu(txq->q_teid);
16551683

1656-
/* Add Tx Queue TEID into the VSI Tx ring from the response
1657-
* This will complete configuring and enabling the queue.
1658-
*/
1659-
txq = &qg_buf->txqs[0];
1660-
if (pf_q == le16_to_cpu(txq->txq_id))
1661-
vsi->tx_rings[i]->txq_teid =
1662-
le32_to_cpu(txq->q_teid);
1684+
q_idx++;
1685+
}
16631686
}
16641687
err_cfg_txqs:
16651688
devm_kfree(&pf->pdev->dev, qg_buf);
@@ -2057,6 +2080,9 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
20572080
/* set RSS capabilities */
20582081
ice_vsi_set_rss_params(vsi);
20592082

2083+
/* set tc configuration */
2084+
ice_vsi_set_tc_cfg(vsi);
2085+
20602086
/* create the VSI */
20612087
ret = ice_vsi_init(vsi);
20622088
if (ret)
@@ -2120,11 +2146,9 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
21202146
goto unroll_vsi_init;
21212147
}
21222148

2123-
ice_vsi_set_tc_cfg(vsi);
2124-
21252149
/* configure VSI nodes based on number of queues and TC's */
21262150
for (i = 0; i < vsi->tc_cfg.numtc; i++)
2127-
max_txqs[i] = vsi->num_txq;
2151+
max_txqs[i] = pf->num_lan_tx;
21282152

21292153
ret = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc,
21302154
max_txqs);
@@ -2520,11 +2544,13 @@ int ice_vsi_release(struct ice_vsi *vsi)
25202544
int ice_vsi_rebuild(struct ice_vsi *vsi)
25212545
{
25222546
u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
2547+
struct ice_pf *pf;
25232548
int ret, i;
25242549

25252550
if (!vsi)
25262551
return -EINVAL;
25272552

2553+
pf = vsi->back;
25282554
ice_vsi_free_q_vectors(vsi);
25292555
ice_free_res(vsi->back->sw_irq_tracker, vsi->sw_base_vector, vsi->idx);
25302556
ice_free_res(vsi->back->hw_irq_tracker, vsi->hw_base_vector, vsi->idx);
@@ -2534,6 +2560,7 @@ int ice_vsi_rebuild(struct ice_vsi *vsi)
25342560
ice_vsi_free_arrays(vsi, false);
25352561
ice_dev_onetime_setup(&vsi->back->hw);
25362562
ice_vsi_set_num_qs(vsi);
2563+
ice_vsi_set_tc_cfg(vsi);
25372564

25382565
/* Initialize VSI struct elements and create VSI in FW */
25392566
ret = ice_vsi_init(vsi);
@@ -2580,11 +2607,9 @@ int ice_vsi_rebuild(struct ice_vsi *vsi)
25802607
break;
25812608
}
25822609

2583-
ice_vsi_set_tc_cfg(vsi);
2584-
25852610
/* configure VSI nodes based on number of queues and TC's */
25862611
for (i = 0; i < vsi->tc_cfg.numtc; i++)
2587-
max_txqs[i] = vsi->num_txq;
2612+
max_txqs[i] = pf->num_lan_tx;
25882613

25892614
ret = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc,
25902615
max_txqs);

drivers/net/ethernet/intel/ice/ice_main.c

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -349,6 +349,9 @@ ice_prepare_for_reset(struct ice_pf *pf)
349349
/* disable the VSIs and their queues that are not already DOWN */
350350
ice_pf_dis_all_vsi(pf);
351351

352+
if (hw->port_info)
353+
ice_sched_clear_port(hw->port_info);
354+
352355
ice_shutdown_all_ctrlq(hw);
353356

354357
set_bit(__ICE_PREPARED_FOR_RESET, pf->state);
@@ -2543,7 +2546,6 @@ static int ice_vsi_cfg(struct ice_vsi *vsi)
25432546
if (err)
25442547
return err;
25452548
}
2546-
25472549
err = ice_vsi_cfg_txqs(vsi);
25482550
if (!err)
25492551
err = ice_vsi_cfg_rxqs(vsi);

drivers/net/ethernet/intel/ice/ice_sched.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -630,7 +630,7 @@ static void ice_sched_clear_tx_topo(struct ice_port_info *pi)
630630
*
631631
* Cleanup scheduling elements from SW DB
632632
*/
633-
static void ice_sched_clear_port(struct ice_port_info *pi)
633+
void ice_sched_clear_port(struct ice_port_info *pi)
634634
{
635635
if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
636636
return;

drivers/net/ethernet/intel/ice/ice_sched.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -26,6 +26,7 @@ struct ice_sched_agg_info {
2626
/* FW AQ command calls */
2727
enum ice_status ice_sched_init_port(struct ice_port_info *pi);
2828
enum ice_status ice_sched_query_res_alloc(struct ice_hw *hw);
29+
void ice_sched_clear_port(struct ice_port_info *pi);
2930
void ice_sched_cleanup_all(struct ice_hw *hw);
3031
struct ice_sched_node *
3132
ice_sched_find_node_by_teid(struct ice_sched_node *start_node, u32 teid);

0 commit comments

Comments
 (0)