@@ -115,6 +115,8 @@ static int ice_vsi_alloc_q_vector(struct ice_vsi *vsi, u16 v_idx)
115
115
q_vector -> rx .itr_setting = ICE_DFLT_RX_ITR ;
116
116
q_vector -> tx .itr_mode = ITR_DYNAMIC ;
117
117
q_vector -> rx .itr_mode = ITR_DYNAMIC ;
118
+ q_vector -> tx .type = ICE_TX_CONTAINER ;
119
+ q_vector -> rx .type = ICE_RX_CONTAINER ;
118
120
119
121
if (vsi -> type == ICE_VSI_VF )
120
122
goto out ;
@@ -146,7 +148,8 @@ static void ice_free_q_vector(struct ice_vsi *vsi, int v_idx)
146
148
{
147
149
struct ice_q_vector * q_vector ;
148
150
struct ice_pf * pf = vsi -> back ;
149
- struct ice_ring * ring ;
151
+ struct ice_tx_ring * tx_ring ;
152
+ struct ice_rx_ring * rx_ring ;
150
153
struct device * dev ;
151
154
152
155
dev = ice_pf_to_dev (pf );
@@ -156,10 +159,10 @@ static void ice_free_q_vector(struct ice_vsi *vsi, int v_idx)
156
159
}
157
160
q_vector = vsi -> q_vectors [v_idx ];
158
161
159
- ice_for_each_ring ( ring , q_vector -> tx )
160
- ring -> q_vector = NULL ;
161
- ice_for_each_ring ( ring , q_vector -> rx )
162
- ring -> q_vector = NULL ;
162
+ ice_for_each_tx_ring ( tx_ring , q_vector -> tx )
163
+ tx_ring -> q_vector = NULL ;
164
+ ice_for_each_rx_ring ( rx_ring , q_vector -> rx )
165
+ rx_ring -> q_vector = NULL ;
163
166
164
167
/* only VSI with an associated netdev is set up with NAPI */
165
168
if (vsi -> netdev )
@@ -201,12 +204,12 @@ static void ice_cfg_itr_gran(struct ice_hw *hw)
201
204
}
202
205
203
206
/**
204
- * ice_calc_q_handle - calculate the queue handle
207
+ * ice_calc_txq_handle - calculate the queue handle
205
208
* @vsi: VSI that ring belongs to
206
209
* @ring: ring to get the absolute queue index
207
210
* @tc: traffic class number
208
211
*/
209
- static u16 ice_calc_q_handle (struct ice_vsi * vsi , struct ice_ring * ring , u8 tc )
212
+ static u16 ice_calc_txq_handle (struct ice_vsi * vsi , struct ice_tx_ring * ring , u8 tc )
210
213
{
211
214
WARN_ONCE (ice_ring_is_xdp (ring ) && tc , "XDP ring can't belong to TC other than 0\n" );
212
215
@@ -218,7 +221,7 @@ static u16 ice_calc_q_handle(struct ice_vsi *vsi, struct ice_ring *ring, u8 tc)
218
221
}
219
222
220
223
/**
221
- * ice_eswitch_calc_q_handle
224
+ * ice_eswitch_calc_txq_handle
222
225
* @ring: pointer to ring which unique index is needed
223
226
*
224
227
* To correctly work with many netdevs ring->q_index of Tx rings on switchdev
@@ -228,7 +231,7 @@ static u16 ice_calc_q_handle(struct ice_vsi *vsi, struct ice_ring *ring, u8 tc)
228
231
* Return ICE_INVAL_Q_INDEX when index wasn't found. Should never happen,
229
232
* because VSI is get from ring->vsi, so it has to be present in this VSI.
230
233
*/
231
- static u16 ice_eswitch_calc_q_handle (struct ice_ring * ring )
234
+ static u16 ice_eswitch_calc_txq_handle (struct ice_tx_ring * ring )
232
235
{
233
236
struct ice_vsi * vsi = ring -> vsi ;
234
237
int i ;
@@ -248,7 +251,7 @@ static u16 ice_eswitch_calc_q_handle(struct ice_ring *ring)
248
251
* This enables/disables XPS for a given Tx descriptor ring
249
252
* based on the TCs enabled for the VSI that ring belongs to.
250
253
*/
251
- static void ice_cfg_xps_tx_ring (struct ice_ring * ring )
254
+ static void ice_cfg_xps_tx_ring (struct ice_tx_ring * ring )
252
255
{
253
256
if (!ring -> q_vector || !ring -> netdev )
254
257
return ;
@@ -270,7 +273,7 @@ static void ice_cfg_xps_tx_ring(struct ice_ring *ring)
270
273
* Configure the Tx descriptor ring in TLAN context.
271
274
*/
272
275
static void
273
- ice_setup_tx_ctx (struct ice_ring * ring , struct ice_tlan_ctx * tlan_ctx , u16 pf_q )
276
+ ice_setup_tx_ctx (struct ice_tx_ring * ring , struct ice_tlan_ctx * tlan_ctx , u16 pf_q )
274
277
{
275
278
struct ice_vsi * vsi = ring -> vsi ;
276
279
struct ice_hw * hw = & vsi -> back -> hw ;
@@ -282,7 +285,7 @@ ice_setup_tx_ctx(struct ice_ring *ring, struct ice_tlan_ctx *tlan_ctx, u16 pf_q)
282
285
/* Transmit Queue Length */
283
286
tlan_ctx -> qlen = ring -> count ;
284
287
285
- ice_set_cgd_num (tlan_ctx , ring );
288
+ ice_set_cgd_num (tlan_ctx , ring -> dcb_tc );
286
289
287
290
/* PF number */
288
291
tlan_ctx -> pf_num = hw -> pf_id ;
@@ -339,7 +342,7 @@ ice_setup_tx_ctx(struct ice_ring *ring, struct ice_tlan_ctx *tlan_ctx, u16 pf_q)
339
342
*
340
343
* Returns the offset value for ring into the data buffer.
341
344
*/
342
- static unsigned int ice_rx_offset (struct ice_ring * rx_ring )
345
+ static unsigned int ice_rx_offset (struct ice_rx_ring * rx_ring )
343
346
{
344
347
if (ice_ring_uses_build_skb (rx_ring ))
345
348
return ICE_SKB_PAD ;
@@ -355,7 +358,7 @@ static unsigned int ice_rx_offset(struct ice_ring *rx_ring)
355
358
*
356
359
* Configure the Rx descriptor ring in RLAN context.
357
360
*/
358
- static int ice_setup_rx_ctx (struct ice_ring * ring )
361
+ static int ice_setup_rx_ctx (struct ice_rx_ring * ring )
359
362
{
360
363
int chain_len = ICE_MAX_CHAINED_RX_BUFS ;
361
364
struct ice_vsi * vsi = ring -> vsi ;
@@ -466,7 +469,7 @@ static int ice_setup_rx_ctx(struct ice_ring *ring)
466
469
*
467
470
* Return 0 on success and a negative value on error.
468
471
*/
469
- int ice_vsi_cfg_rxq (struct ice_ring * ring )
472
+ int ice_vsi_cfg_rxq (struct ice_rx_ring * ring )
470
473
{
471
474
struct device * dev = ice_pf_to_dev (ring -> vsi -> back );
472
475
u16 num_bufs = ICE_DESC_UNUSED (ring );
@@ -687,33 +690,33 @@ void ice_vsi_map_rings_to_vectors(struct ice_vsi *vsi)
687
690
tx_rings_per_v = (u8 )DIV_ROUND_UP (tx_rings_rem ,
688
691
q_vectors - v_id );
689
692
q_vector -> num_ring_tx = tx_rings_per_v ;
690
- q_vector -> tx .ring = NULL ;
693
+ q_vector -> tx .tx_ring = NULL ;
691
694
q_vector -> tx .itr_idx = ICE_TX_ITR ;
692
695
q_base = vsi -> num_txq - tx_rings_rem ;
693
696
694
697
for (q_id = q_base ; q_id < (q_base + tx_rings_per_v ); q_id ++ ) {
695
- struct ice_ring * tx_ring = vsi -> tx_rings [q_id ];
698
+ struct ice_tx_ring * tx_ring = vsi -> tx_rings [q_id ];
696
699
697
700
tx_ring -> q_vector = q_vector ;
698
- tx_ring -> next = q_vector -> tx .ring ;
699
- q_vector -> tx .ring = tx_ring ;
701
+ tx_ring -> next = q_vector -> tx .tx_ring ;
702
+ q_vector -> tx .tx_ring = tx_ring ;
700
703
}
701
704
tx_rings_rem -= tx_rings_per_v ;
702
705
703
706
/* Rx rings mapping to vector */
704
707
rx_rings_per_v = (u8 )DIV_ROUND_UP (rx_rings_rem ,
705
708
q_vectors - v_id );
706
709
q_vector -> num_ring_rx = rx_rings_per_v ;
707
- q_vector -> rx .ring = NULL ;
710
+ q_vector -> rx .rx_ring = NULL ;
708
711
q_vector -> rx .itr_idx = ICE_RX_ITR ;
709
712
q_base = vsi -> num_rxq - rx_rings_rem ;
710
713
711
714
for (q_id = q_base ; q_id < (q_base + rx_rings_per_v ); q_id ++ ) {
712
- struct ice_ring * rx_ring = vsi -> rx_rings [q_id ];
715
+ struct ice_rx_ring * rx_ring = vsi -> rx_rings [q_id ];
713
716
714
717
rx_ring -> q_vector = q_vector ;
715
- rx_ring -> next = q_vector -> rx .ring ;
716
- q_vector -> rx .ring = rx_ring ;
718
+ rx_ring -> next = q_vector -> rx .rx_ring ;
719
+ q_vector -> rx .rx_ring = rx_ring ;
717
720
}
718
721
rx_rings_rem -= rx_rings_per_v ;
719
722
}
@@ -738,7 +741,7 @@ void ice_vsi_free_q_vectors(struct ice_vsi *vsi)
738
741
* @qg_buf: queue group buffer
739
742
*/
740
743
int
741
- ice_vsi_cfg_txq (struct ice_vsi * vsi , struct ice_ring * ring ,
744
+ ice_vsi_cfg_txq (struct ice_vsi * vsi , struct ice_tx_ring * ring ,
742
745
struct ice_aqc_add_tx_qgrp * qg_buf )
743
746
{
744
747
u8 buf_len = struct_size (qg_buf , txqs , 1 );
@@ -774,12 +777,12 @@ ice_vsi_cfg_txq(struct ice_vsi *vsi, struct ice_ring *ring,
774
777
* TC into the VSI Tx ring
775
778
*/
776
779
if (vsi -> type == ICE_VSI_SWITCHDEV_CTRL ) {
777
- ring -> q_handle = ice_eswitch_calc_q_handle (ring );
780
+ ring -> q_handle = ice_eswitch_calc_txq_handle (ring );
778
781
779
782
if (ring -> q_handle == ICE_INVAL_Q_INDEX )
780
783
return - ENODEV ;
781
784
} else {
782
- ring -> q_handle = ice_calc_q_handle (vsi , ring , tc );
785
+ ring -> q_handle = ice_calc_txq_handle (vsi , ring , tc );
783
786
}
784
787
785
788
status = ice_ena_vsi_txq (vsi -> port_info , vsi -> idx , tc , ring -> q_handle ,
@@ -904,7 +907,7 @@ void ice_trigger_sw_intr(struct ice_hw *hw, struct ice_q_vector *q_vector)
904
907
*/
905
908
int
906
909
ice_vsi_stop_tx_ring (struct ice_vsi * vsi , enum ice_disq_rst_src rst_src ,
907
- u16 rel_vmvf_num , struct ice_ring * ring ,
910
+ u16 rel_vmvf_num , struct ice_tx_ring * ring ,
908
911
struct ice_txq_meta * txq_meta )
909
912
{
910
913
struct ice_pf * pf = vsi -> back ;
@@ -961,7 +964,7 @@ ice_vsi_stop_tx_ring(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
961
964
* are needed for stopping Tx queue
962
965
*/
963
966
void
964
- ice_fill_txq_meta (struct ice_vsi * vsi , struct ice_ring * ring ,
967
+ ice_fill_txq_meta (struct ice_vsi * vsi , struct ice_tx_ring * ring ,
965
968
struct ice_txq_meta * txq_meta )
966
969
{
967
970
u8 tc ;
0 commit comments