@@ -124,13 +124,15 @@ struct flag_table {
124
124
125
125
/*
126
126
* RSM instance allocation
127
- * 0 - Verbs
128
- * 1 - User Fecn Handling
129
- * 2 - Vnic
127
+ * 0 - User Fecn Handling
128
+ * 1 - Vnic
129
+ * 2 - AIP
130
+ * 3 - Verbs
130
131
*/
131
- #define RSM_INS_VERBS 0
132
- #define RSM_INS_FECN 1
133
- #define RSM_INS_VNIC 2
132
+ #define RSM_INS_FECN 0
133
+ #define RSM_INS_VNIC 1
134
+ #define RSM_INS_AIP 2
135
+ #define RSM_INS_VERBS 3
134
136
135
137
/* Bit offset into the GUID which carries HFI id information */
136
138
#define GUID_HFI_INDEX_SHIFT 39
@@ -171,6 +173,25 @@ struct flag_table {
171
173
/* QPN[m+n:1] QW 1, OFFSET 1 */
172
174
#define QPN_SELECT_OFFSET ((1ull << QW_SHIFT) | (1ull))
173
175
176
+ /* RSM fields for AIP */
177
+ /* LRH.BTH above is reused for this rule */
178
+
179
+ /* BTH.DESTQP: QW 1, OFFSET 16 for match */
180
+ #define BTH_DESTQP_QW 1ull
181
+ #define BTH_DESTQP_BIT_OFFSET 16ull
182
+ #define BTH_DESTQP_OFFSET (off ) ((BTH_DESTQP_QW << QW_SHIFT) | (off))
183
+ #define BTH_DESTQP_MATCH_OFFSET BTH_DESTQP_OFFSET(BTH_DESTQP_BIT_OFFSET)
184
+ #define BTH_DESTQP_MASK 0xFFull
185
+ #define BTH_DESTQP_VALUE 0x81ull
186
+
187
+ /* DETH.SQPN: QW 1 Offset 56 for select */
188
+ /* We use 8 most significant Soure QPN bits as entropy fpr AIP */
189
+ #define DETH_AIP_SQPN_QW 3ull
190
+ #define DETH_AIP_SQPN_BIT_OFFSET 56ull
191
+ #define DETH_AIP_SQPN_OFFSET (off ) ((DETH_AIP_SQPN_QW << QW_SHIFT) | (off))
192
+ #define DETH_AIP_SQPN_SELECT_OFFSET \
193
+ DETH_AIP_SQPN_OFFSET(DETH_AIP_SQPN_BIT_OFFSET)
194
+
174
195
/* RSM fields for Vnic */
175
196
/* L2_TYPE: QW 0, OFFSET 61 - for match */
176
197
#define L2_TYPE_QW 0ull
@@ -14236,6 +14257,12 @@ static void complete_rsm_map_table(struct hfi1_devdata *dd,
14236
14257
}
14237
14258
}
14238
14259
14260
+ /* Is a receive side mapping rule */
14261
+ static bool has_rsm_rule (struct hfi1_devdata * dd , u8 rule_index )
14262
+ {
14263
+ return read_csr (dd , RCV_RSM_CFG + (8 * rule_index )) != 0 ;
14264
+ }
14265
+
14239
14266
/*
14240
14267
* Add a receive side mapping rule.
14241
14268
*/
@@ -14472,39 +14499,49 @@ static void init_fecn_handling(struct hfi1_devdata *dd,
14472
14499
rmt -> used += total_cnt ;
14473
14500
}
14474
14501
14475
- /* Initialize RSM for VNIC */
14476
- void hfi1_init_vnic_rsm (struct hfi1_devdata * dd )
14502
+ static inline bool hfi1_is_rmt_full (int start , int spare )
14503
+ {
14504
+ return (start + spare ) > NUM_MAP_ENTRIES ;
14505
+ }
14506
+
14507
+ static bool hfi1_netdev_update_rmt (struct hfi1_devdata * dd )
14477
14508
{
14478
14509
u8 i , j ;
14479
14510
u8 ctx_id = 0 ;
14480
14511
u64 reg ;
14481
14512
u32 regoff ;
14482
- struct rsm_rule_data rrd ;
14513
+ int rmt_start = dd -> vnic . rmt_start ;
14483
14514
14484
- if (hfi1_vnic_is_rsm_full (dd , NUM_VNIC_MAP_ENTRIES )) {
14485
- dd_dev_err (dd , "Vnic RSM disabled, rmt entries used = %d\n" ,
14486
- dd -> vnic .rmt_start );
14487
- return ;
14515
+ /* We already have contexts mapped in RMT */
14516
+ if (has_rsm_rule (dd , RSM_INS_VNIC ) || has_rsm_rule (dd , RSM_INS_AIP )) {
14517
+ dd_dev_info (dd , "Contexts are already mapped in RMT\n" );
14518
+ return true;
14519
+ }
14520
+
14521
+ if (hfi1_is_rmt_full (rmt_start , NUM_VNIC_MAP_ENTRIES )) {
14522
+ dd_dev_err (dd , "Not enought RMT entries used = %d\n" ,
14523
+ rmt_start );
14524
+ return false;
14488
14525
}
14489
14526
14490
- dev_dbg (& (dd )-> pcidev -> dev , "Vnic rsm start = %d, end %d\n" ,
14491
- dd -> vnic . rmt_start ,
14492
- dd -> vnic . rmt_start + NUM_VNIC_MAP_ENTRIES );
14527
+ dev_dbg (& (dd )-> pcidev -> dev , "RMT start = %d, end %d\n" ,
14528
+ rmt_start ,
14529
+ rmt_start + NUM_VNIC_MAP_ENTRIES );
14493
14530
14494
14531
/* Update RSM mapping table, 32 regs, 256 entries - 1 ctx per byte */
14495
- regoff = RCV_RSM_MAP_TABLE + (dd -> vnic . rmt_start / 8 ) * 8 ;
14532
+ regoff = RCV_RSM_MAP_TABLE + (rmt_start / 8 ) * 8 ;
14496
14533
reg = read_csr (dd , regoff );
14497
14534
for (i = 0 ; i < NUM_VNIC_MAP_ENTRIES ; i ++ ) {
14498
- /* Update map register with vnic context */
14499
- j = (dd -> vnic . rmt_start + i ) % 8 ;
14535
+ /* Update map register with netdev context */
14536
+ j = (rmt_start + i ) % 8 ;
14500
14537
reg &= ~(0xffllu << (j * 8 ));
14501
14538
reg |= (u64 )dd -> vnic .ctxt [ctx_id ++ ]-> ctxt << (j * 8 );
14502
- /* Wrap up vnic ctx index */
14539
+ /* Wrap up netdev ctx index */
14503
14540
ctx_id %= dd -> vnic .num_ctxt ;
14504
14541
/* Write back map register */
14505
14542
if (j == 7 || ((i + 1 ) == NUM_VNIC_MAP_ENTRIES )) {
14506
14543
dev_dbg (& (dd )-> pcidev -> dev ,
14507
- "Vnic rsm map reg [%d] =0x%llx\n" ,
14544
+ "RMT [%d] =0x%llx\n" ,
14508
14545
regoff - RCV_RSM_MAP_TABLE , reg );
14509
14546
14510
14547
write_csr (dd , regoff , reg );
@@ -14514,35 +14551,83 @@ void hfi1_init_vnic_rsm(struct hfi1_devdata *dd)
14514
14551
}
14515
14552
}
14516
14553
14517
- /* Add rule for vnic */
14518
- rrd .offset = dd -> vnic .rmt_start ;
14519
- rrd .pkt_type = 4 ;
14520
- /* Match 16B packets */
14521
- rrd .field1_off = L2_TYPE_MATCH_OFFSET ;
14522
- rrd .mask1 = L2_TYPE_MASK ;
14523
- rrd .value1 = L2_16B_VALUE ;
14524
- /* Match ETH L4 packets */
14525
- rrd .field2_off = L4_TYPE_MATCH_OFFSET ;
14526
- rrd .mask2 = L4_16B_TYPE_MASK ;
14527
- rrd .value2 = L4_16B_ETH_VALUE ;
14528
- /* Calc context from veswid and entropy */
14529
- rrd .index1_off = L4_16B_HDR_VESWID_OFFSET ;
14530
- rrd .index1_width = ilog2 (NUM_VNIC_MAP_ENTRIES );
14531
- rrd .index2_off = L2_16B_ENTROPY_OFFSET ;
14532
- rrd .index2_width = ilog2 (NUM_VNIC_MAP_ENTRIES );
14533
- add_rsm_rule (dd , RSM_INS_VNIC , & rrd );
14534
-
14535
- /* Enable RSM if not already enabled */
14554
+ return true;
14555
+ }
14556
+
14557
+ static void hfi1_enable_rsm_rule (struct hfi1_devdata * dd ,
14558
+ int rule , struct rsm_rule_data * rrd )
14559
+ {
14560
+ if (!hfi1_netdev_update_rmt (dd )) {
14561
+ dd_dev_err (dd , "Failed to update RMT for RSM%d rule\n" , rule );
14562
+ return ;
14563
+ }
14564
+
14565
+ add_rsm_rule (dd , rule , rrd );
14536
14566
add_rcvctrl (dd , RCV_CTRL_RCV_RSM_ENABLE_SMASK );
14537
14567
}
14538
14568
14569
+ void hfi1_init_aip_rsm (struct hfi1_devdata * dd )
14570
+ {
14571
+ /*
14572
+ * go through with the initialisation only if this rule actually doesn't
14573
+ * exist yet
14574
+ */
14575
+ if (atomic_fetch_inc (& dd -> ipoib_rsm_usr_num ) == 0 ) {
14576
+ struct rsm_rule_data rrd = {
14577
+ .offset = dd -> vnic .rmt_start ,
14578
+ .pkt_type = IB_PACKET_TYPE ,
14579
+ .field1_off = LRH_BTH_MATCH_OFFSET ,
14580
+ .mask1 = LRH_BTH_MASK ,
14581
+ .value1 = LRH_BTH_VALUE ,
14582
+ .field2_off = BTH_DESTQP_MATCH_OFFSET ,
14583
+ .mask2 = BTH_DESTQP_MASK ,
14584
+ .value2 = BTH_DESTQP_VALUE ,
14585
+ .index1_off = DETH_AIP_SQPN_SELECT_OFFSET +
14586
+ ilog2 (NUM_VNIC_MAP_ENTRIES ),
14587
+ .index1_width = ilog2 (NUM_VNIC_MAP_ENTRIES ),
14588
+ .index2_off = DETH_AIP_SQPN_SELECT_OFFSET ,
14589
+ .index2_width = ilog2 (NUM_VNIC_MAP_ENTRIES )
14590
+ };
14591
+
14592
+ hfi1_enable_rsm_rule (dd , RSM_INS_AIP , & rrd );
14593
+ }
14594
+ }
14595
+
14596
+ /* Initialize RSM for VNIC */
14597
+ void hfi1_init_vnic_rsm (struct hfi1_devdata * dd )
14598
+ {
14599
+ struct rsm_rule_data rrd = {
14600
+ /* Add rule for vnic */
14601
+ .offset = dd -> vnic .rmt_start ,
14602
+ .pkt_type = 4 ,
14603
+ /* Match 16B packets */
14604
+ .field1_off = L2_TYPE_MATCH_OFFSET ,
14605
+ .mask1 = L2_TYPE_MASK ,
14606
+ .value1 = L2_16B_VALUE ,
14607
+ /* Match ETH L4 packets */
14608
+ .field2_off = L4_TYPE_MATCH_OFFSET ,
14609
+ .mask2 = L4_16B_TYPE_MASK ,
14610
+ .value2 = L4_16B_ETH_VALUE ,
14611
+ /* Calc context from veswid and entropy */
14612
+ .index1_off = L4_16B_HDR_VESWID_OFFSET ,
14613
+ .index1_width = ilog2 (NUM_VNIC_MAP_ENTRIES ),
14614
+ .index2_off = L2_16B_ENTROPY_OFFSET ,
14615
+ .index2_width = ilog2 (NUM_VNIC_MAP_ENTRIES )
14616
+ };
14617
+
14618
+ hfi1_enable_rsm_rule (dd , RSM_INS_VNIC , & rrd );
14619
+ }
14620
+
14539
14621
void hfi1_deinit_vnic_rsm (struct hfi1_devdata * dd )
14540
14622
{
14541
14623
clear_rsm_rule (dd , RSM_INS_VNIC );
14624
+ }
14542
14625
14543
- /* Disable RSM if used only by vnic */
14544
- if (dd -> vnic .rmt_start == 0 )
14545
- clear_rcvctrl (dd , RCV_CTRL_RCV_RSM_ENABLE_SMASK );
14626
+ void hfi1_deinit_aip_rsm (struct hfi1_devdata * dd )
14627
+ {
14628
+ /* only actually clear the rule if it's the last user asking to do so */
14629
+ if (atomic_fetch_add_unless (& dd -> ipoib_rsm_usr_num , -1 , 0 ) == 1 )
14630
+ clear_rsm_rule (dd , RSM_INS_AIP );
14546
14631
}
14547
14632
14548
14633
static int init_rxe (struct hfi1_devdata * dd )
0 commit comments