39
39
#include "eswitch.h"
40
40
#include "en.h"
41
41
#include "fs_core.h"
42
+ #include "lib/devcom.h"
42
43
43
44
enum {
44
45
FDB_FAST_PATH = 0 ,
@@ -541,6 +542,98 @@ void mlx5_eswitch_del_send_to_vport_rule(struct mlx5_flow_handle *rule)
541
542
mlx5_del_flow_rules (rule );
542
543
}
543
544
545
+ static void peer_miss_rules_setup (struct mlx5_core_dev * peer_dev ,
546
+ struct mlx5_flow_spec * spec ,
547
+ struct mlx5_flow_destination * dest )
548
+ {
549
+ void * misc = MLX5_ADDR_OF (fte_match_param , spec -> match_value ,
550
+ misc_parameters );
551
+
552
+ MLX5_SET (fte_match_set_misc , misc , source_eswitch_owner_vhca_id ,
553
+ MLX5_CAP_GEN (peer_dev , vhca_id ));
554
+
555
+ spec -> match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS ;
556
+
557
+ misc = MLX5_ADDR_OF (fte_match_param , spec -> match_criteria ,
558
+ misc_parameters );
559
+ MLX5_SET_TO_ONES (fte_match_set_misc , misc , source_port );
560
+ MLX5_SET_TO_ONES (fte_match_set_misc , misc ,
561
+ source_eswitch_owner_vhca_id );
562
+
563
+ dest -> type = MLX5_FLOW_DESTINATION_TYPE_VPORT ;
564
+ dest -> vport .num = 0 ;
565
+ dest -> vport .vhca_id = MLX5_CAP_GEN (peer_dev , vhca_id );
566
+ dest -> vport .vhca_id_valid = 1 ;
567
+ }
568
+
569
+ static int esw_add_fdb_peer_miss_rules (struct mlx5_eswitch * esw ,
570
+ struct mlx5_core_dev * peer_dev )
571
+ {
572
+ struct mlx5_flow_destination dest = {};
573
+ struct mlx5_flow_act flow_act = {0 };
574
+ struct mlx5_flow_handle * * flows ;
575
+ struct mlx5_flow_handle * flow ;
576
+ struct mlx5_flow_spec * spec ;
577
+ /* total vports is the same for both e-switches */
578
+ int nvports = esw -> total_vports ;
579
+ void * misc ;
580
+ int err , i ;
581
+
582
+ spec = kvzalloc (sizeof (* spec ), GFP_KERNEL );
583
+ if (!spec )
584
+ return - ENOMEM ;
585
+
586
+ peer_miss_rules_setup (peer_dev , spec , & dest );
587
+
588
+ flows = kvzalloc (nvports * sizeof (* flows ), GFP_KERNEL );
589
+ if (!flows ) {
590
+ err = - ENOMEM ;
591
+ goto alloc_flows_err ;
592
+ }
593
+
594
+ flow_act .action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST ;
595
+ misc = MLX5_ADDR_OF (fte_match_param , spec -> match_value ,
596
+ misc_parameters );
597
+
598
+ for (i = 1 ; i < nvports ; i ++ ) {
599
+ MLX5_SET (fte_match_set_misc , misc , source_port , i );
600
+ flow = mlx5_add_flow_rules (esw -> fdb_table .offloads .slow_fdb ,
601
+ spec , & flow_act , & dest , 1 );
602
+ if (IS_ERR (flow )) {
603
+ err = PTR_ERR (flow );
604
+ esw_warn (esw -> dev , "FDB: Failed to add peer miss flow rule err %d\n" , err );
605
+ goto add_flow_err ;
606
+ }
607
+ flows [i ] = flow ;
608
+ }
609
+
610
+ esw -> fdb_table .offloads .peer_miss_rules = flows ;
611
+
612
+ kvfree (spec );
613
+ return 0 ;
614
+
615
+ add_flow_err :
616
+ for (i -- ; i > 0 ; i -- )
617
+ mlx5_del_flow_rules (flows [i ]);
618
+ kvfree (flows );
619
+ alloc_flows_err :
620
+ kvfree (spec );
621
+ return err ;
622
+ }
623
+
624
+ static void esw_del_fdb_peer_miss_rules (struct mlx5_eswitch * esw )
625
+ {
626
+ struct mlx5_flow_handle * * flows ;
627
+ int i ;
628
+
629
+ flows = esw -> fdb_table .offloads .peer_miss_rules ;
630
+
631
+ for (i = 1 ; i < esw -> total_vports ; i ++ )
632
+ mlx5_del_flow_rules (flows [i ]);
633
+
634
+ kvfree (flows );
635
+ }
636
+
544
637
static int esw_add_fdb_miss_rule (struct mlx5_eswitch * esw )
545
638
{
546
639
struct mlx5_flow_act flow_act = {0 };
@@ -811,7 +904,8 @@ static int esw_create_offloads_fdb_tables(struct mlx5_eswitch *esw, int nvports)
811
904
esw -> fdb_table .offloads .fdb_left [i ] =
812
905
ESW_POOLS [i ] <= fdb_max ? ESW_SIZE / ESW_POOLS [i ] : 0 ;
813
906
814
- table_size = nvports * MAX_SQ_NVPORTS + MAX_PF_SQ + 2 ;
907
+ table_size = nvports * MAX_SQ_NVPORTS + MAX_PF_SQ + 2 +
908
+ esw -> total_vports ;
815
909
816
910
/* create the slow path fdb with encap set, so further table instances
817
911
* can be created at run time while VFs are probed if the FW allows that.
@@ -866,6 +960,34 @@ static int esw_create_offloads_fdb_tables(struct mlx5_eswitch *esw, int nvports)
866
960
}
867
961
esw -> fdb_table .offloads .send_to_vport_grp = g ;
868
962
963
+ /* create peer esw miss group */
964
+ memset (flow_group_in , 0 , inlen );
965
+ MLX5_SET (create_flow_group_in , flow_group_in , match_criteria_enable ,
966
+ MLX5_MATCH_MISC_PARAMETERS );
967
+
968
+ match_criteria = MLX5_ADDR_OF (create_flow_group_in , flow_group_in ,
969
+ match_criteria );
970
+
971
+ MLX5_SET_TO_ONES (fte_match_param , match_criteria ,
972
+ misc_parameters .source_port );
973
+ MLX5_SET_TO_ONES (fte_match_param , match_criteria ,
974
+ misc_parameters .source_eswitch_owner_vhca_id );
975
+
976
+ MLX5_SET (create_flow_group_in , flow_group_in ,
977
+ source_eswitch_owner_vhca_id_valid , 1 );
978
+ MLX5_SET (create_flow_group_in , flow_group_in , start_flow_index , ix );
979
+ MLX5_SET (create_flow_group_in , flow_group_in , end_flow_index ,
980
+ ix + esw -> total_vports - 1 );
981
+ ix += esw -> total_vports ;
982
+
983
+ g = mlx5_create_flow_group (fdb , flow_group_in );
984
+ if (IS_ERR (g )) {
985
+ err = PTR_ERR (g );
986
+ esw_warn (dev , "Failed to create peer miss flow group err(%d)\n" , err );
987
+ goto peer_miss_err ;
988
+ }
989
+ esw -> fdb_table .offloads .peer_miss_grp = g ;
990
+
869
991
/* create miss group */
870
992
memset (flow_group_in , 0 , inlen );
871
993
MLX5_SET (create_flow_group_in , flow_group_in , match_criteria_enable ,
@@ -898,6 +1020,8 @@ static int esw_create_offloads_fdb_tables(struct mlx5_eswitch *esw, int nvports)
898
1020
miss_rule_err :
899
1021
mlx5_destroy_flow_group (esw -> fdb_table .offloads .miss_grp );
900
1022
miss_err :
1023
+ mlx5_destroy_flow_group (esw -> fdb_table .offloads .peer_miss_grp );
1024
+ peer_miss_err :
901
1025
mlx5_destroy_flow_group (esw -> fdb_table .offloads .send_to_vport_grp );
902
1026
send_vport_err :
903
1027
esw_destroy_offloads_fast_fdb_tables (esw );
@@ -917,6 +1041,7 @@ static void esw_destroy_offloads_fdb_tables(struct mlx5_eswitch *esw)
917
1041
mlx5_del_flow_rules (esw -> fdb_table .offloads .miss_rule_multi );
918
1042
mlx5_del_flow_rules (esw -> fdb_table .offloads .miss_rule_uni );
919
1043
mlx5_destroy_flow_group (esw -> fdb_table .offloads .send_to_vport_grp );
1044
+ mlx5_destroy_flow_group (esw -> fdb_table .offloads .peer_miss_grp );
920
1045
mlx5_destroy_flow_group (esw -> fdb_table .offloads .miss_grp );
921
1046
922
1047
mlx5_destroy_flow_table (esw -> fdb_table .offloads .slow_fdb );
@@ -1173,6 +1298,99 @@ static int esw_offloads_load_reps(struct mlx5_eswitch *esw, int nvports)
1173
1298
return err ;
1174
1299
}
1175
1300
1301
+ #define ESW_OFFLOADS_DEVCOM_PAIR (0)
1302
+ #define ESW_OFFLOADS_DEVCOM_UNPAIR (1)
1303
+
1304
+ static int mlx5_esw_offloads_pair (struct mlx5_eswitch * esw ,
1305
+ struct mlx5_eswitch * peer_esw )
1306
+ {
1307
+ int err ;
1308
+
1309
+ err = esw_add_fdb_peer_miss_rules (esw , peer_esw -> dev );
1310
+ if (err )
1311
+ return err ;
1312
+
1313
+ return 0 ;
1314
+ }
1315
+
1316
+ static void mlx5_esw_offloads_unpair (struct mlx5_eswitch * esw )
1317
+ {
1318
+ esw_del_fdb_peer_miss_rules (esw );
1319
+ }
1320
+
1321
+ static int mlx5_esw_offloads_devcom_event (int event ,
1322
+ void * my_data ,
1323
+ void * event_data )
1324
+ {
1325
+ struct mlx5_eswitch * esw = my_data ;
1326
+ struct mlx5_eswitch * peer_esw = event_data ;
1327
+ struct mlx5_devcom * devcom = esw -> dev -> priv .devcom ;
1328
+ int err ;
1329
+
1330
+ switch (event ) {
1331
+ case ESW_OFFLOADS_DEVCOM_PAIR :
1332
+ err = mlx5_esw_offloads_pair (esw , peer_esw );
1333
+ if (err )
1334
+ goto err_out ;
1335
+
1336
+ err = mlx5_esw_offloads_pair (peer_esw , esw );
1337
+ if (err )
1338
+ goto err_pair ;
1339
+
1340
+ mlx5_devcom_set_paired (devcom , MLX5_DEVCOM_ESW_OFFLOADS , true);
1341
+ break ;
1342
+
1343
+ case ESW_OFFLOADS_DEVCOM_UNPAIR :
1344
+ if (!mlx5_devcom_is_paired (devcom , MLX5_DEVCOM_ESW_OFFLOADS ))
1345
+ break ;
1346
+
1347
+ mlx5_devcom_set_paired (devcom , MLX5_DEVCOM_ESW_OFFLOADS , false);
1348
+ mlx5_esw_offloads_unpair (peer_esw );
1349
+ mlx5_esw_offloads_unpair (esw );
1350
+ break ;
1351
+ }
1352
+
1353
+ return 0 ;
1354
+
1355
+ err_pair :
1356
+ mlx5_esw_offloads_unpair (esw );
1357
+
1358
+ err_out :
1359
+ mlx5_core_err (esw -> dev , "esw offloads devcom event failure, event %u err %d" ,
1360
+ event , err );
1361
+ return err ;
1362
+ }
1363
+
1364
+ static void esw_offloads_devcom_init (struct mlx5_eswitch * esw )
1365
+ {
1366
+ struct mlx5_devcom * devcom = esw -> dev -> priv .devcom ;
1367
+
1368
+ if (!MLX5_CAP_ESW (esw -> dev , merged_eswitch ))
1369
+ return ;
1370
+
1371
+ mlx5_devcom_register_component (devcom ,
1372
+ MLX5_DEVCOM_ESW_OFFLOADS ,
1373
+ mlx5_esw_offloads_devcom_event ,
1374
+ esw );
1375
+
1376
+ mlx5_devcom_send_event (devcom ,
1377
+ MLX5_DEVCOM_ESW_OFFLOADS ,
1378
+ ESW_OFFLOADS_DEVCOM_PAIR , esw );
1379
+ }
1380
+
1381
+ static void esw_offloads_devcom_cleanup (struct mlx5_eswitch * esw )
1382
+ {
1383
+ struct mlx5_devcom * devcom = esw -> dev -> priv .devcom ;
1384
+
1385
+ if (!MLX5_CAP_ESW (esw -> dev , merged_eswitch ))
1386
+ return ;
1387
+
1388
+ mlx5_devcom_send_event (devcom , MLX5_DEVCOM_ESW_OFFLOADS ,
1389
+ ESW_OFFLOADS_DEVCOM_UNPAIR , esw );
1390
+
1391
+ mlx5_devcom_unregister_component (devcom , MLX5_DEVCOM_ESW_OFFLOADS );
1392
+ }
1393
+
1176
1394
int esw_offloads_init (struct mlx5_eswitch * esw , int nvports )
1177
1395
{
1178
1396
int err ;
@@ -1195,6 +1413,7 @@ int esw_offloads_init(struct mlx5_eswitch *esw, int nvports)
1195
1413
if (err )
1196
1414
goto err_reps ;
1197
1415
1416
+ esw_offloads_devcom_init (esw );
1198
1417
return 0 ;
1199
1418
1200
1419
err_reps :
@@ -1233,6 +1452,7 @@ static int esw_offloads_stop(struct mlx5_eswitch *esw,
1233
1452
1234
1453
void esw_offloads_cleanup (struct mlx5_eswitch * esw , int nvports )
1235
1454
{
1455
+ esw_offloads_devcom_cleanup (esw );
1236
1456
esw_offloads_unload_reps (esw , nvports );
1237
1457
esw_destroy_vport_rx_group (esw );
1238
1458
esw_destroy_offloads_table (esw );
0 commit comments