37
37
#include <linux/mlx5/fs.h>
38
38
#include "mlx5_core.h"
39
39
#include "eswitch.h"
40
+ #include "fs_core.h"
40
41
41
42
#define UPLINK_VPORT 0xFFFF
42
43
@@ -1123,8 +1124,12 @@ static void esw_vport_disable_ingress_acl(struct mlx5_eswitch *esw,
1123
1124
static int esw_vport_ingress_config (struct mlx5_eswitch * esw ,
1124
1125
struct mlx5_vport * vport )
1125
1126
{
1127
+ struct mlx5_fc * counter = vport -> ingress .drop_counter ;
1128
+ struct mlx5_flow_destination drop_ctr_dst = {0 };
1129
+ struct mlx5_flow_destination * dst = NULL ;
1126
1130
struct mlx5_flow_act flow_act = {0 };
1127
1131
struct mlx5_flow_spec * spec ;
1132
+ int dest_num = 0 ;
1128
1133
int err = 0 ;
1129
1134
u8 * smac_v ;
1130
1135
@@ -1188,9 +1193,18 @@ static int esw_vport_ingress_config(struct mlx5_eswitch *esw,
1188
1193
1189
1194
memset (spec , 0 , sizeof (* spec ));
1190
1195
flow_act .action = MLX5_FLOW_CONTEXT_ACTION_DROP ;
1196
+
1197
+ /* Attach drop flow counter */
1198
+ if (counter ) {
1199
+ flow_act .action |= MLX5_FLOW_CONTEXT_ACTION_COUNT ;
1200
+ drop_ctr_dst .type = MLX5_FLOW_DESTINATION_TYPE_COUNTER ;
1201
+ drop_ctr_dst .counter = counter ;
1202
+ dst = & drop_ctr_dst ;
1203
+ dest_num ++ ;
1204
+ }
1191
1205
vport -> ingress .drop_rule =
1192
1206
mlx5_add_flow_rules (vport -> ingress .acl , spec ,
1193
- & flow_act , NULL , 0 );
1207
+ & flow_act , dst , dest_num );
1194
1208
if (IS_ERR (vport -> ingress .drop_rule )) {
1195
1209
err = PTR_ERR (vport -> ingress .drop_rule );
1196
1210
esw_warn (esw -> dev ,
@@ -1210,8 +1224,12 @@ static int esw_vport_ingress_config(struct mlx5_eswitch *esw,
1210
1224
static int esw_vport_egress_config (struct mlx5_eswitch * esw ,
1211
1225
struct mlx5_vport * vport )
1212
1226
{
1227
+ struct mlx5_fc * counter = vport -> egress .drop_counter ;
1228
+ struct mlx5_flow_destination drop_ctr_dst = {0 };
1229
+ struct mlx5_flow_destination * dst = NULL ;
1213
1230
struct mlx5_flow_act flow_act = {0 };
1214
1231
struct mlx5_flow_spec * spec ;
1232
+ int dest_num = 0 ;
1215
1233
int err = 0 ;
1216
1234
1217
1235
esw_vport_cleanup_egress_rules (esw , vport );
@@ -1262,9 +1280,18 @@ static int esw_vport_egress_config(struct mlx5_eswitch *esw,
1262
1280
/* Drop others rule (star rule) */
1263
1281
memset (spec , 0 , sizeof (* spec ));
1264
1282
flow_act .action = MLX5_FLOW_CONTEXT_ACTION_DROP ;
1283
+
1284
+ /* Attach egress drop flow counter */
1285
+ if (counter ) {
1286
+ flow_act .action |= MLX5_FLOW_CONTEXT_ACTION_COUNT ;
1287
+ drop_ctr_dst .type = MLX5_FLOW_DESTINATION_TYPE_COUNTER ;
1288
+ drop_ctr_dst .counter = counter ;
1289
+ dst = & drop_ctr_dst ;
1290
+ dest_num ++ ;
1291
+ }
1265
1292
vport -> egress .drop_rule =
1266
1293
mlx5_add_flow_rules (vport -> egress .acl , spec ,
1267
- & flow_act , NULL , 0 );
1294
+ & flow_act , dst , dest_num );
1268
1295
if (IS_ERR (vport -> egress .drop_rule )) {
1269
1296
err = PTR_ERR (vport -> egress .drop_rule );
1270
1297
esw_warn (esw -> dev ,
@@ -1457,6 +1484,41 @@ static void esw_apply_vport_conf(struct mlx5_eswitch *esw,
1457
1484
}
1458
1485
}
1459
1486
1487
+ static void esw_vport_create_drop_counters (struct mlx5_vport * vport )
1488
+ {
1489
+ struct mlx5_core_dev * dev = vport -> dev ;
1490
+
1491
+ if (MLX5_CAP_ESW_INGRESS_ACL (dev , flow_counter )) {
1492
+ vport -> ingress .drop_counter = mlx5_fc_create (dev , false);
1493
+ if (IS_ERR (vport -> ingress .drop_counter )) {
1494
+ esw_warn (dev ,
1495
+ "vport[%d] configure ingress drop rule counter failed\n" ,
1496
+ vport -> vport );
1497
+ vport -> ingress .drop_counter = NULL ;
1498
+ }
1499
+ }
1500
+
1501
+ if (MLX5_CAP_ESW_EGRESS_ACL (dev , flow_counter )) {
1502
+ vport -> egress .drop_counter = mlx5_fc_create (dev , false);
1503
+ if (IS_ERR (vport -> egress .drop_counter )) {
1504
+ esw_warn (dev ,
1505
+ "vport[%d] configure egress drop rule counter failed\n" ,
1506
+ vport -> vport );
1507
+ vport -> egress .drop_counter = NULL ;
1508
+ }
1509
+ }
1510
+ }
1511
+
1512
+ static void esw_vport_destroy_drop_counters (struct mlx5_vport * vport )
1513
+ {
1514
+ struct mlx5_core_dev * dev = vport -> dev ;
1515
+
1516
+ if (vport -> ingress .drop_counter )
1517
+ mlx5_fc_destroy (dev , vport -> ingress .drop_counter );
1518
+ if (vport -> egress .drop_counter )
1519
+ mlx5_fc_destroy (dev , vport -> egress .drop_counter );
1520
+ }
1521
+
1460
1522
static void esw_enable_vport (struct mlx5_eswitch * esw , int vport_num ,
1461
1523
int enable_events )
1462
1524
{
@@ -1483,6 +1545,10 @@ static void esw_enable_vport(struct mlx5_eswitch *esw, int vport_num,
1483
1545
if (!vport_num )
1484
1546
vport -> info .trusted = true;
1485
1547
1548
+ /* create steering drop counters for ingress and egress ACLs */
1549
+ if (vport_num && esw -> mode == SRIOV_LEGACY )
1550
+ esw_vport_create_drop_counters (vport );
1551
+
1486
1552
esw_vport_change_handle_locked (vport );
1487
1553
1488
1554
esw -> enabled_vports ++ ;
@@ -1521,6 +1587,7 @@ static void esw_disable_vport(struct mlx5_eswitch *esw, int vport_num)
1521
1587
MLX5_ESW_VPORT_ADMIN_STATE_DOWN );
1522
1588
esw_vport_disable_egress_acl (esw , vport );
1523
1589
esw_vport_disable_ingress_acl (esw , vport );
1590
+ esw_vport_destroy_drop_counters (vport );
1524
1591
}
1525
1592
esw -> enabled_vports -- ;
1526
1593
mutex_unlock (& esw -> state_lock );
@@ -2016,12 +2083,36 @@ int mlx5_eswitch_set_vport_rate(struct mlx5_eswitch *esw, int vport,
2016
2083
return err ;
2017
2084
}
2018
2085
2086
+ static void mlx5_eswitch_query_vport_drop_stats (struct mlx5_core_dev * dev ,
2087
+ int vport_idx ,
2088
+ struct mlx5_vport_drop_stats * stats )
2089
+ {
2090
+ struct mlx5_eswitch * esw = dev -> priv .eswitch ;
2091
+ struct mlx5_vport * vport = & esw -> vports [vport_idx ];
2092
+ u64 bytes = 0 ;
2093
+ u16 idx = 0 ;
2094
+
2095
+ if (!vport -> enabled || esw -> mode != SRIOV_LEGACY )
2096
+ return ;
2097
+
2098
+ if (vport -> egress .drop_counter ) {
2099
+ idx = vport -> egress .drop_counter -> id ;
2100
+ mlx5_fc_query (dev , idx , & stats -> rx_dropped , & bytes );
2101
+ }
2102
+
2103
+ if (vport -> ingress .drop_counter ) {
2104
+ idx = vport -> ingress .drop_counter -> id ;
2105
+ mlx5_fc_query (dev , idx , & stats -> tx_dropped , & bytes );
2106
+ }
2107
+ }
2108
+
2019
2109
int mlx5_eswitch_get_vport_stats (struct mlx5_eswitch * esw ,
2020
2110
int vport ,
2021
2111
struct ifla_vf_stats * vf_stats )
2022
2112
{
2023
2113
int outlen = MLX5_ST_SZ_BYTES (query_vport_counter_out );
2024
2114
u32 in [MLX5_ST_SZ_DW (query_vport_counter_in )] = {0 };
2115
+ struct mlx5_vport_drop_stats stats = {0 };
2025
2116
int err = 0 ;
2026
2117
u32 * out ;
2027
2118
@@ -2076,6 +2167,10 @@ int mlx5_eswitch_get_vport_stats(struct mlx5_eswitch *esw,
2076
2167
vf_stats -> broadcast =
2077
2168
MLX5_GET_CTR (out , received_eth_broadcast .packets );
2078
2169
2170
+ mlx5_eswitch_query_vport_drop_stats (esw -> dev , vport , & stats );
2171
+ vf_stats -> rx_dropped = stats .rx_dropped ;
2172
+ vf_stats -> tx_dropped = stats .tx_dropped ;
2173
+
2079
2174
free_out :
2080
2175
kvfree (out );
2081
2176
return err ;
0 commit comments