@@ -774,11 +774,13 @@ static void ice_set_dflt_vsi_ctx(struct ice_vsi_ctx *ctxt)
774
774
*/
775
775
static void ice_vsi_setup_q_map (struct ice_vsi * vsi , struct ice_vsi_ctx * ctxt )
776
776
{
777
- u16 offset = 0 , qmap = 0 , numq_tc ;
778
- u16 pow = 0 , max_rss = 0 , qcount ;
777
+ u16 offset = 0 , qmap = 0 , tx_count = 0 ;
779
778
u16 qcount_tx = vsi -> alloc_txq ;
780
779
u16 qcount_rx = vsi -> alloc_rxq ;
780
+ u16 tx_numq_tc , rx_numq_tc ;
781
+ u16 pow = 0 , max_rss = 0 ;
781
782
bool ena_tc0 = false;
783
+ u8 netdev_tc = 0 ;
782
784
int i ;
783
785
784
786
/* at least TC0 should be enabled by default */
@@ -794,7 +796,12 @@ static void ice_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt)
794
796
vsi -> tc_cfg .ena_tc |= 1 ;
795
797
}
796
798
797
- numq_tc = qcount_rx / vsi -> tc_cfg .numtc ;
799
+ rx_numq_tc = qcount_rx / vsi -> tc_cfg .numtc ;
800
+ if (!rx_numq_tc )
801
+ rx_numq_tc = 1 ;
802
+ tx_numq_tc = qcount_tx / vsi -> tc_cfg .numtc ;
803
+ if (!tx_numq_tc )
804
+ tx_numq_tc = 1 ;
798
805
799
806
/* TC mapping is a function of the number of Rx queues assigned to the
800
807
* VSI for each traffic class and the offset of these queues.
@@ -808,45 +815,50 @@ static void ice_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt)
808
815
* Setup number and offset of Rx queues for all TCs for the VSI
809
816
*/
810
817
811
- qcount = numq_tc ;
818
+ qcount_rx = rx_numq_tc ;
819
+
812
820
/* qcount will change if RSS is enabled */
813
821
if (test_bit (ICE_FLAG_RSS_ENA , vsi -> back -> flags )) {
814
822
if (vsi -> type == ICE_VSI_PF || vsi -> type == ICE_VSI_VF ) {
815
823
if (vsi -> type == ICE_VSI_PF )
816
824
max_rss = ICE_MAX_LG_RSS_QS ;
817
825
else
818
826
max_rss = ICE_MAX_SMALL_RSS_QS ;
819
- qcount = min_t (int , numq_tc , max_rss );
820
- qcount = min_t (int , qcount , vsi -> rss_size );
827
+ qcount_rx = min_t (int , rx_numq_tc , max_rss );
828
+ qcount_rx = min_t (int , qcount_rx , vsi -> rss_size );
821
829
}
822
830
}
823
831
824
832
/* find the (rounded up) power-of-2 of qcount */
825
- pow = order_base_2 (qcount );
833
+ pow = order_base_2 (qcount_rx );
826
834
827
835
for (i = 0 ; i < ICE_MAX_TRAFFIC_CLASS ; i ++ ) {
828
836
if (!(vsi -> tc_cfg .ena_tc & BIT (i ))) {
829
837
/* TC is not enabled */
830
838
vsi -> tc_cfg .tc_info [i ].qoffset = 0 ;
831
- vsi -> tc_cfg .tc_info [i ].qcount = 1 ;
839
+ vsi -> tc_cfg .tc_info [i ].qcount_rx = 1 ;
840
+ vsi -> tc_cfg .tc_info [i ].qcount_tx = 1 ;
841
+ vsi -> tc_cfg .tc_info [i ].netdev_tc = 0 ;
832
842
ctxt -> info .tc_mapping [i ] = 0 ;
833
843
continue ;
834
844
}
835
845
836
846
/* TC is enabled */
837
847
vsi -> tc_cfg .tc_info [i ].qoffset = offset ;
838
- vsi -> tc_cfg .tc_info [i ].qcount = qcount ;
848
+ vsi -> tc_cfg .tc_info [i ].qcount_rx = qcount_rx ;
849
+ vsi -> tc_cfg .tc_info [i ].qcount_tx = tx_numq_tc ;
850
+ vsi -> tc_cfg .tc_info [i ].netdev_tc = netdev_tc ++ ;
839
851
840
852
qmap = ((offset << ICE_AQ_VSI_TC_Q_OFFSET_S ) &
841
853
ICE_AQ_VSI_TC_Q_OFFSET_M ) |
842
854
((pow << ICE_AQ_VSI_TC_Q_NUM_S ) &
843
855
ICE_AQ_VSI_TC_Q_NUM_M );
844
- offset += qcount ;
856
+ offset += qcount_rx ;
857
+ tx_count += tx_numq_tc ;
845
858
ctxt -> info .tc_mapping [i ] = cpu_to_le16 (qmap );
846
859
}
847
-
848
- vsi -> num_txq = qcount_tx ;
849
860
vsi -> num_rxq = offset ;
861
+ vsi -> num_txq = tx_count ;
850
862
851
863
if (vsi -> type == ICE_VSI_VF && vsi -> num_txq != vsi -> num_rxq ) {
852
864
dev_dbg (& vsi -> back -> pdev -> dev , "VF VSI should have same number of Tx and Rx queues. Hence making them equal\n" );
@@ -1611,10 +1623,10 @@ int ice_vsi_cfg_txqs(struct ice_vsi *vsi)
1611
1623
struct ice_aqc_add_tx_qgrp * qg_buf ;
1612
1624
struct ice_aqc_add_txqs_perq * txq ;
1613
1625
struct ice_pf * pf = vsi -> back ;
1626
+ u8 num_q_grps , q_idx = 0 ;
1614
1627
enum ice_status status ;
1615
1628
u16 buf_len , i , pf_q ;
1616
1629
int err = 0 , tc = 0 ;
1617
- u8 num_q_grps ;
1618
1630
1619
1631
buf_len = sizeof (struct ice_aqc_add_tx_qgrp );
1620
1632
qg_buf = devm_kzalloc (& pf -> pdev -> dev , buf_len , GFP_KERNEL );
@@ -1628,38 +1640,49 @@ int ice_vsi_cfg_txqs(struct ice_vsi *vsi)
1628
1640
qg_buf -> num_txqs = 1 ;
1629
1641
num_q_grps = 1 ;
1630
1642
1631
- /* set up and configure the Tx queues */
1632
- ice_for_each_txq (vsi , i ) {
1633
- struct ice_tlan_ctx tlan_ctx = { 0 };
1643
+ /* set up and configure the Tx queues for each enabled TC */
1644
+ for (tc = 0 ; tc < ICE_MAX_TRAFFIC_CLASS ; tc ++ ) {
1645
+ if (!(vsi -> tc_cfg .ena_tc & BIT (tc )))
1646
+ break ;
1634
1647
1635
- pf_q = vsi -> txq_map [i ];
1636
- ice_setup_tx_ctx (vsi -> tx_rings [i ], & tlan_ctx , pf_q );
1637
- /* copy context contents into the qg_buf */
1638
- qg_buf -> txqs [0 ].txq_id = cpu_to_le16 (pf_q );
1639
- ice_set_ctx ((u8 * )& tlan_ctx , qg_buf -> txqs [0 ].txq_ctx ,
1640
- ice_tlan_ctx_info );
1648
+ for (i = 0 ; i < vsi -> tc_cfg .tc_info [tc ].qcount_tx ; i ++ ) {
1649
+ struct ice_tlan_ctx tlan_ctx = { 0 };
1650
+
1651
+ pf_q = vsi -> txq_map [q_idx ];
1652
+ ice_setup_tx_ctx (vsi -> tx_rings [q_idx ], & tlan_ctx ,
1653
+ pf_q );
1654
+ /* copy context contents into the qg_buf */
1655
+ qg_buf -> txqs [0 ].txq_id = cpu_to_le16 (pf_q );
1656
+ ice_set_ctx ((u8 * )& tlan_ctx , qg_buf -> txqs [0 ].txq_ctx ,
1657
+ ice_tlan_ctx_info );
1658
+
1659
+ /* init queue specific tail reg. It is referred as
1660
+ * transmit comm scheduler queue doorbell.
1661
+ */
1662
+ vsi -> tx_rings [q_idx ]-> tail =
1663
+ pf -> hw .hw_addr + QTX_COMM_DBELL (pf_q );
1664
+ status = ice_ena_vsi_txq (vsi -> port_info , vsi -> idx , tc ,
1665
+ num_q_grps , qg_buf , buf_len ,
1666
+ NULL );
1667
+ if (status ) {
1668
+ dev_err (& vsi -> back -> pdev -> dev ,
1669
+ "Failed to set LAN Tx queue context, error: %d\n" ,
1670
+ status );
1671
+ err = - ENODEV ;
1672
+ goto err_cfg_txqs ;
1673
+ }
1641
1674
1642
- /* init queue specific tail reg. It is referred as transmit
1643
- * comm scheduler queue doorbell.
1644
- */
1645
- vsi -> tx_rings [i ]-> tail = pf -> hw .hw_addr + QTX_COMM_DBELL (pf_q );
1646
- status = ice_ena_vsi_txq (vsi -> port_info , vsi -> idx , tc ,
1647
- num_q_grps , qg_buf , buf_len , NULL );
1648
- if (status ) {
1649
- dev_err (& vsi -> back -> pdev -> dev ,
1650
- "Failed to set LAN Tx queue context, error: %d\n" ,
1651
- status );
1652
- err = - ENODEV ;
1653
- goto err_cfg_txqs ;
1654
- }
1675
+ /* Add Tx Queue TEID into the VSI Tx ring from the
1676
+ * response. This will complete configuring and
1677
+ * enabling the queue.
1678
+ */
1679
+ txq = & qg_buf -> txqs [0 ];
1680
+ if (pf_q == le16_to_cpu (txq -> txq_id ))
1681
+ vsi -> tx_rings [q_idx ]-> txq_teid =
1682
+ le32_to_cpu (txq -> q_teid );
1655
1683
1656
- /* Add Tx Queue TEID into the VSI Tx ring from the response
1657
- * This will complete configuring and enabling the queue.
1658
- */
1659
- txq = & qg_buf -> txqs [0 ];
1660
- if (pf_q == le16_to_cpu (txq -> txq_id ))
1661
- vsi -> tx_rings [i ]-> txq_teid =
1662
- le32_to_cpu (txq -> q_teid );
1684
+ q_idx ++ ;
1685
+ }
1663
1686
}
1664
1687
err_cfg_txqs :
1665
1688
devm_kfree (& pf -> pdev -> dev , qg_buf );
@@ -2057,6 +2080,9 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
2057
2080
/* set RSS capabilities */
2058
2081
ice_vsi_set_rss_params (vsi );
2059
2082
2083
+ /* set tc configuration */
2084
+ ice_vsi_set_tc_cfg (vsi );
2085
+
2060
2086
/* create the VSI */
2061
2087
ret = ice_vsi_init (vsi );
2062
2088
if (ret )
@@ -2120,11 +2146,9 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
2120
2146
goto unroll_vsi_init ;
2121
2147
}
2122
2148
2123
- ice_vsi_set_tc_cfg (vsi );
2124
-
2125
2149
/* configure VSI nodes based on number of queues and TC's */
2126
2150
for (i = 0 ; i < vsi -> tc_cfg .numtc ; i ++ )
2127
- max_txqs [i ] = vsi -> num_txq ;
2151
+ max_txqs [i ] = pf -> num_lan_tx ;
2128
2152
2129
2153
ret = ice_cfg_vsi_lan (vsi -> port_info , vsi -> idx , vsi -> tc_cfg .ena_tc ,
2130
2154
max_txqs );
@@ -2520,11 +2544,13 @@ int ice_vsi_release(struct ice_vsi *vsi)
2520
2544
int ice_vsi_rebuild (struct ice_vsi * vsi )
2521
2545
{
2522
2546
u16 max_txqs [ICE_MAX_TRAFFIC_CLASS ] = { 0 };
2547
+ struct ice_pf * pf ;
2523
2548
int ret , i ;
2524
2549
2525
2550
if (!vsi )
2526
2551
return - EINVAL ;
2527
2552
2553
+ pf = vsi -> back ;
2528
2554
ice_vsi_free_q_vectors (vsi );
2529
2555
ice_free_res (vsi -> back -> sw_irq_tracker , vsi -> sw_base_vector , vsi -> idx );
2530
2556
ice_free_res (vsi -> back -> hw_irq_tracker , vsi -> hw_base_vector , vsi -> idx );
@@ -2534,6 +2560,7 @@ int ice_vsi_rebuild(struct ice_vsi *vsi)
2534
2560
ice_vsi_free_arrays (vsi , false);
2535
2561
ice_dev_onetime_setup (& vsi -> back -> hw );
2536
2562
ice_vsi_set_num_qs (vsi );
2563
+ ice_vsi_set_tc_cfg (vsi );
2537
2564
2538
2565
/* Initialize VSI struct elements and create VSI in FW */
2539
2566
ret = ice_vsi_init (vsi );
@@ -2580,11 +2607,9 @@ int ice_vsi_rebuild(struct ice_vsi *vsi)
2580
2607
break ;
2581
2608
}
2582
2609
2583
- ice_vsi_set_tc_cfg (vsi );
2584
-
2585
2610
/* configure VSI nodes based on number of queues and TC's */
2586
2611
for (i = 0 ; i < vsi -> tc_cfg .numtc ; i ++ )
2587
- max_txqs [i ] = vsi -> num_txq ;
2612
+ max_txqs [i ] = pf -> num_lan_tx ;
2588
2613
2589
2614
ret = ice_cfg_vsi_lan (vsi -> port_info , vsi -> idx , vsi -> tc_cfg .ena_tc ,
2590
2615
max_txqs );
0 commit comments