@@ -53,13 +53,24 @@ static struct mlx5_flow_rule *mlx5e_tc_add_flow(struct mlx5e_priv *priv,
53
53
u32 * match_c , u32 * match_v ,
54
54
u32 action , u32 flow_tag )
55
55
{
56
- struct mlx5_flow_destination dest = {
57
- .type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE ,
58
- {.ft = priv -> fs .vlan .ft .t },
59
- };
56
+ struct mlx5_core_dev * dev = priv -> mdev ;
57
+ struct mlx5_flow_destination dest = { 0 };
58
+ struct mlx5_fc * counter = NULL ;
60
59
struct mlx5_flow_rule * rule ;
61
60
bool table_created = false;
62
61
62
+ if (action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST ) {
63
+ dest .type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE ;
64
+ dest .ft = priv -> fs .vlan .ft .t ;
65
+ } else {
66
+ counter = mlx5_fc_create (dev , true);
67
+ if (IS_ERR (counter ))
68
+ return ERR_CAST (counter );
69
+
70
+ dest .type = MLX5_FLOW_DESTINATION_TYPE_COUNTER ;
71
+ dest .counter = counter ;
72
+ }
73
+
63
74
if (IS_ERR_OR_NULL (priv -> fs .tc .t )) {
64
75
priv -> fs .tc .t =
65
76
mlx5_create_auto_grouped_flow_table (priv -> fs .ns ,
@@ -70,7 +81,8 @@ static struct mlx5_flow_rule *mlx5e_tc_add_flow(struct mlx5e_priv *priv,
70
81
if (IS_ERR (priv -> fs .tc .t )) {
71
82
netdev_err (priv -> netdev ,
72
83
"Failed to create tc offload table\n" );
73
- return ERR_CAST (priv -> fs .tc .t );
84
+ rule = ERR_CAST (priv -> fs .tc .t );
85
+ goto err_create_ft ;
74
86
}
75
87
76
88
table_created = true;
@@ -79,21 +91,35 @@ static struct mlx5_flow_rule *mlx5e_tc_add_flow(struct mlx5e_priv *priv,
79
91
rule = mlx5_add_flow_rule (priv -> fs .tc .t , MLX5_MATCH_OUTER_HEADERS ,
80
92
match_c , match_v ,
81
93
action , flow_tag ,
82
- action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST ? & dest : NULL );
94
+ & dest );
95
+
96
+ if (IS_ERR (rule ))
97
+ goto err_add_rule ;
98
+
99
+ return rule ;
83
100
84
- if (IS_ERR (rule ) && table_created ) {
101
+ err_add_rule :
102
+ if (table_created ) {
85
103
mlx5_destroy_flow_table (priv -> fs .tc .t );
86
104
priv -> fs .tc .t = NULL ;
87
105
}
106
+ err_create_ft :
107
+ mlx5_fc_destroy (dev , counter );
88
108
89
109
return rule ;
90
110
}
91
111
92
112
static void mlx5e_tc_del_flow (struct mlx5e_priv * priv ,
93
113
struct mlx5_flow_rule * rule )
94
114
{
115
+ struct mlx5_fc * counter = NULL ;
116
+
117
+ counter = mlx5_flow_rule_counter (rule );
118
+
95
119
mlx5_del_flow_rule (rule );
96
120
121
+ mlx5_fc_destroy (priv -> mdev , counter );
122
+
97
123
if (!mlx5e_tc_num_filters (priv )) {
98
124
mlx5_destroy_flow_table (priv -> fs .tc .t );
99
125
priv -> fs .tc .t = NULL ;
@@ -286,6 +312,9 @@ static int parse_tc_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
286
312
287
313
if (is_tcf_gact_shot (a )) {
288
314
* action |= MLX5_FLOW_CONTEXT_ACTION_DROP ;
315
+ if (MLX5_CAP_FLOWTABLE (priv -> mdev ,
316
+ flow_table_properties_nic_receive .flow_counter ))
317
+ * action |= MLX5_FLOW_CONTEXT_ACTION_COUNT ;
289
318
continue ;
290
319
}
291
320
@@ -394,6 +423,34 @@ int mlx5e_delete_flower(struct mlx5e_priv *priv,
394
423
return 0 ;
395
424
}
396
425
426
+ int mlx5e_stats_flower (struct mlx5e_priv * priv ,
427
+ struct tc_cls_flower_offload * f )
428
+ {
429
+ struct mlx5e_tc_table * tc = & priv -> fs .tc ;
430
+ struct mlx5e_tc_flow * flow ;
431
+ struct tc_action * a ;
432
+ struct mlx5_fc * counter ;
433
+ u64 bytes ;
434
+ u64 packets ;
435
+ u64 lastuse ;
436
+
437
+ flow = rhashtable_lookup_fast (& tc -> ht , & f -> cookie ,
438
+ tc -> ht_params );
439
+ if (!flow )
440
+ return - EINVAL ;
441
+
442
+ counter = mlx5_flow_rule_counter (flow -> rule );
443
+ if (!counter )
444
+ return 0 ;
445
+
446
+ mlx5_fc_query_cached (counter , & bytes , & packets , & lastuse );
447
+
448
+ tc_for_each_action (a , f -> exts )
449
+ tcf_action_stats_update (a , bytes , packets , lastuse );
450
+
451
+ return 0 ;
452
+ }
453
+
397
454
static const struct rhashtable_params mlx5e_tc_flow_ht_params = {
398
455
.head_offset = offsetof(struct mlx5e_tc_flow , node ),
399
456
.key_offset = offsetof(struct mlx5e_tc_flow , cookie ),
0 commit comments