@@ -2449,6 +2449,7 @@ static void set_tos(void *outer_c, void *outer_v, u8 mask, u8 val)
2449
2449
#define LAST_TUNNEL_FIELD tunnel_id
2450
2450
#define LAST_FLOW_TAG_FIELD tag_id
2451
2451
#define LAST_DROP_FIELD size
2452
+ #define LAST_COUNTERS_FIELD counters
2452
2453
2453
2454
/* Field is the last supported field */
2454
2455
#define FIELDS_NOT_SUPPORTED (filter , field )\
@@ -2721,6 +2722,18 @@ static int parse_flow_attr(struct mlx5_core_dev *mdev, u32 *match_c,
2721
2722
if (ret )
2722
2723
return ret ;
2723
2724
break ;
2725
+ case IB_FLOW_SPEC_ACTION_COUNT :
2726
+ if (FIELDS_NOT_SUPPORTED (ib_spec -> flow_count ,
2727
+ LAST_COUNTERS_FIELD ))
2728
+ return - EOPNOTSUPP ;
2729
+
2730
+ /* for now support only one counters spec per flow */
2731
+ if (action -> action & MLX5_FLOW_CONTEXT_ACTION_COUNT )
2732
+ return - EINVAL ;
2733
+
2734
+ action -> counters = ib_spec -> flow_count .counters ;
2735
+ action -> action |= MLX5_FLOW_CONTEXT_ACTION_COUNT ;
2736
+ break ;
2724
2737
default :
2725
2738
return - EINVAL ;
2726
2739
}
@@ -2868,6 +2881,17 @@ static void put_flow_table(struct mlx5_ib_dev *dev,
2868
2881
}
2869
2882
}
2870
2883
2884
+ static void counters_clear_description (struct ib_counters * counters )
2885
+ {
2886
+ struct mlx5_ib_mcounters * mcounters = to_mcounters (counters );
2887
+
2888
+ mutex_lock (& mcounters -> mcntrs_mutex );
2889
+ kfree (mcounters -> counters_data );
2890
+ mcounters -> counters_data = NULL ;
2891
+ mcounters -> cntrs_max_index = 0 ;
2892
+ mutex_unlock (& mcounters -> mcntrs_mutex );
2893
+ }
2894
+
2871
2895
static int mlx5_ib_destroy_flow (struct ib_flow * flow_id )
2872
2896
{
2873
2897
struct mlx5_ib_dev * dev = to_mdev (flow_id -> qp -> device );
@@ -2887,8 +2911,11 @@ static int mlx5_ib_destroy_flow(struct ib_flow *flow_id)
2887
2911
2888
2912
mlx5_del_flow_rules (handler -> rule );
2889
2913
put_flow_table (dev , handler -> prio , true);
2890
- mutex_unlock (& dev -> flow_db -> lock );
2914
+ if (handler -> ibcounters &&
2915
+ atomic_read (& handler -> ibcounters -> usecnt ) == 1 )
2916
+ counters_clear_description (handler -> ibcounters );
2891
2917
2918
+ mutex_unlock (& dev -> flow_db -> lock );
2892
2919
kfree (handler );
2893
2920
2894
2921
return 0 ;
@@ -3008,21 +3035,127 @@ static void set_underlay_qp(struct mlx5_ib_dev *dev,
3008
3035
}
3009
3036
}
3010
3037
3038
+ static int counters_set_description (struct ib_counters * counters ,
3039
+ enum mlx5_ib_counters_type counters_type ,
3040
+ struct mlx5_ib_flow_counters_desc * desc_data ,
3041
+ u32 ncounters )
3042
+ {
3043
+ struct mlx5_ib_mcounters * mcounters = to_mcounters (counters );
3044
+ u32 cntrs_max_index = 0 ;
3045
+ int i ;
3046
+
3047
+ if (counters_type != MLX5_IB_COUNTERS_FLOW )
3048
+ return - EINVAL ;
3049
+
3050
+ /* init the fields for the object */
3051
+ mcounters -> type = counters_type ;
3052
+ mcounters -> ncounters = ncounters ;
3053
+ /* each counter entry have both description and index pair */
3054
+ for (i = 0 ; i < ncounters ; i ++ ) {
3055
+ if (desc_data [i ].description > IB_COUNTER_BYTES )
3056
+ return - EINVAL ;
3057
+
3058
+ if (cntrs_max_index <= desc_data [i ].index )
3059
+ cntrs_max_index = desc_data [i ].index + 1 ;
3060
+ }
3061
+
3062
+ mutex_lock (& mcounters -> mcntrs_mutex );
3063
+ mcounters -> counters_data = desc_data ;
3064
+ mcounters -> cntrs_max_index = cntrs_max_index ;
3065
+ mutex_unlock (& mcounters -> mcntrs_mutex );
3066
+
3067
+ return 0 ;
3068
+ }
3069
+
3070
+ #define MAX_COUNTERS_NUM (USHRT_MAX / (sizeof(u32) * 2))
3071
+ static int flow_counters_set_data (struct ib_counters * ibcounters ,
3072
+ struct mlx5_ib_create_flow * ucmd )
3073
+ {
3074
+ struct mlx5_ib_mcounters * mcounters = to_mcounters (ibcounters );
3075
+ struct mlx5_ib_flow_counters_data * cntrs_data = NULL ;
3076
+ struct mlx5_ib_flow_counters_desc * desc_data = NULL ;
3077
+ bool hw_hndl = false;
3078
+ int ret = 0 ;
3079
+
3080
+ if (ucmd && ucmd -> ncounters_data != 0 ) {
3081
+ cntrs_data = ucmd -> data ;
3082
+ if (cntrs_data -> ncounters > MAX_COUNTERS_NUM )
3083
+ return - EINVAL ;
3084
+
3085
+ desc_data = kcalloc (cntrs_data -> ncounters ,
3086
+ sizeof (* desc_data ),
3087
+ GFP_KERNEL );
3088
+ if (!desc_data )
3089
+ return - ENOMEM ;
3090
+
3091
+ if (copy_from_user (desc_data ,
3092
+ u64_to_user_ptr (cntrs_data -> counters_data ),
3093
+ sizeof (* desc_data ) * cntrs_data -> ncounters )) {
3094
+ ret = - EFAULT ;
3095
+ goto free ;
3096
+ }
3097
+ }
3098
+
3099
+ if (!mcounters -> hw_cntrs_hndl ) {
3100
+ mcounters -> hw_cntrs_hndl = mlx5_fc_create (
3101
+ to_mdev (ibcounters -> device )-> mdev , false);
3102
+ if (!mcounters -> hw_cntrs_hndl ) {
3103
+ ret = - ENOMEM ;
3104
+ goto free ;
3105
+ }
3106
+ hw_hndl = true;
3107
+ }
3108
+
3109
+ if (desc_data ) {
3110
+ /* counters already bound to at least one flow */
3111
+ if (mcounters -> cntrs_max_index ) {
3112
+ ret = - EINVAL ;
3113
+ goto free_hndl ;
3114
+ }
3115
+
3116
+ ret = counters_set_description (ibcounters ,
3117
+ MLX5_IB_COUNTERS_FLOW ,
3118
+ desc_data ,
3119
+ cntrs_data -> ncounters );
3120
+ if (ret )
3121
+ goto free_hndl ;
3122
+
3123
+ } else if (!mcounters -> cntrs_max_index ) {
3124
+ /* counters not bound yet, must have udata passed */
3125
+ ret = - EINVAL ;
3126
+ goto free_hndl ;
3127
+ }
3128
+
3129
+ return 0 ;
3130
+
3131
+ free_hndl :
3132
+ if (hw_hndl ) {
3133
+ mlx5_fc_destroy (to_mdev (ibcounters -> device )-> mdev ,
3134
+ mcounters -> hw_cntrs_hndl );
3135
+ mcounters -> hw_cntrs_hndl = NULL ;
3136
+ }
3137
+ free :
3138
+ kfree (desc_data );
3139
+ return ret ;
3140
+ }
3141
+
3011
3142
static struct mlx5_ib_flow_handler * _create_flow_rule (struct mlx5_ib_dev * dev ,
3012
3143
struct mlx5_ib_flow_prio * ft_prio ,
3013
3144
const struct ib_flow_attr * flow_attr ,
3014
3145
struct mlx5_flow_destination * dst ,
3015
- u32 underlay_qpn )
3146
+ u32 underlay_qpn ,
3147
+ struct mlx5_ib_create_flow * ucmd )
3016
3148
{
3017
3149
struct mlx5_flow_table * ft = ft_prio -> flow_table ;
3018
3150
struct mlx5_ib_flow_handler * handler ;
3019
3151
struct mlx5_flow_act flow_act = {.flow_tag = MLX5_FS_DEFAULT_FLOW_TAG };
3020
3152
struct mlx5_flow_spec * spec ;
3021
- struct mlx5_flow_destination * rule_dst = dst ;
3153
+ struct mlx5_flow_destination dest_arr [2 ] = {};
3154
+ struct mlx5_flow_destination * rule_dst = dest_arr ;
3022
3155
const void * ib_flow = (const void * )flow_attr + sizeof (* flow_attr );
3023
3156
unsigned int spec_index ;
3024
3157
int err = 0 ;
3025
- int dest_num = 1 ;
3158
+ int dest_num = 0 ;
3026
3159
bool is_egress = flow_attr -> flags & IB_FLOW_ATTR_FLAGS_EGRESS ;
3027
3160
3028
3161
if (!is_valid_attr (dev -> mdev , flow_attr ))
@@ -3036,6 +3169,10 @@ static struct mlx5_ib_flow_handler *_create_flow_rule(struct mlx5_ib_dev *dev,
3036
3169
}
3037
3170
3038
3171
INIT_LIST_HEAD (& handler -> list );
3172
+ if (dst ) {
3173
+ memcpy (& dest_arr [0 ], dst , sizeof (* dst ));
3174
+ dest_num ++ ;
3175
+ }
3039
3176
3040
3177
for (spec_index = 0 ; spec_index < flow_attr -> num_of_specs ; spec_index ++ ) {
3041
3178
err = parse_flow_attr (dev -> mdev , spec -> match_criteria ,
@@ -3070,15 +3207,30 @@ static struct mlx5_ib_flow_handler *_create_flow_rule(struct mlx5_ib_dev *dev,
3070
3207
goto free ;
3071
3208
}
3072
3209
3210
+ if (flow_act .action & MLX5_FLOW_CONTEXT_ACTION_COUNT ) {
3211
+ err = flow_counters_set_data (flow_act .counters , ucmd );
3212
+ if (err )
3213
+ goto free ;
3214
+
3215
+ handler -> ibcounters = flow_act .counters ;
3216
+ dest_arr [dest_num ].type =
3217
+ MLX5_FLOW_DESTINATION_TYPE_COUNTER ;
3218
+ dest_arr [dest_num ].counter =
3219
+ to_mcounters (flow_act .counters )-> hw_cntrs_hndl ;
3220
+ dest_num ++ ;
3221
+ }
3222
+
3073
3223
if (flow_act .action & MLX5_FLOW_CONTEXT_ACTION_DROP ) {
3074
- rule_dst = NULL ;
3075
- dest_num = 0 ;
3224
+ if (!(flow_act .action & MLX5_FLOW_CONTEXT_ACTION_COUNT )) {
3225
+ rule_dst = NULL ;
3226
+ dest_num = 0 ;
3227
+ }
3076
3228
} else {
3077
3229
if (is_egress )
3078
3230
flow_act .action |= MLX5_FLOW_CONTEXT_ACTION_ALLOW ;
3079
3231
else
3080
3232
flow_act .action |=
3081
- dst ? MLX5_FLOW_CONTEXT_ACTION_FWD_DEST :
3233
+ dest_num ? MLX5_FLOW_CONTEXT_ACTION_FWD_DEST :
3082
3234
MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO ;
3083
3235
}
3084
3236
@@ -3104,8 +3256,12 @@ static struct mlx5_ib_flow_handler *_create_flow_rule(struct mlx5_ib_dev *dev,
3104
3256
3105
3257
ft_prio -> flow_table = ft ;
3106
3258
free :
3107
- if (err )
3259
+ if (err && handler ) {
3260
+ if (handler -> ibcounters &&
3261
+ atomic_read (& handler -> ibcounters -> usecnt ) == 1 )
3262
+ counters_clear_description (handler -> ibcounters );
3108
3263
kfree (handler );
3264
+ }
3109
3265
kvfree (spec );
3110
3266
return err ? ERR_PTR (err ) : handler ;
3111
3267
}
@@ -3115,7 +3271,7 @@ static struct mlx5_ib_flow_handler *create_flow_rule(struct mlx5_ib_dev *dev,
3115
3271
const struct ib_flow_attr * flow_attr ,
3116
3272
struct mlx5_flow_destination * dst )
3117
3273
{
3118
- return _create_flow_rule (dev , ft_prio , flow_attr , dst , 0 );
3274
+ return _create_flow_rule (dev , ft_prio , flow_attr , dst , 0 , NULL );
3119
3275
}
3120
3276
3121
3277
static struct mlx5_ib_flow_handler * create_dont_trap_rule (struct mlx5_ib_dev * dev ,
@@ -3255,12 +3411,43 @@ static struct ib_flow *mlx5_ib_create_flow(struct ib_qp *qp,
3255
3411
struct mlx5_ib_flow_prio * ft_prio_tx = NULL ;
3256
3412
struct mlx5_ib_flow_prio * ft_prio ;
3257
3413
bool is_egress = flow_attr -> flags & IB_FLOW_ATTR_FLAGS_EGRESS ;
3414
+ struct mlx5_ib_create_flow * ucmd = NULL , ucmd_hdr ;
3415
+ size_t min_ucmd_sz , required_ucmd_sz ;
3258
3416
int err ;
3259
3417
int underlay_qpn ;
3260
3418
3261
- if (udata &&
3262
- udata -> inlen && !ib_is_udata_cleared (udata , 0 , udata -> inlen ))
3263
- return ERR_PTR (- EOPNOTSUPP );
3419
+ if (udata && udata -> inlen ) {
3420
+ min_ucmd_sz = offsetof(typeof (ucmd_hdr ), reserved ) +
3421
+ sizeof (ucmd_hdr .reserved );
3422
+ if (udata -> inlen < min_ucmd_sz )
3423
+ return ERR_PTR (- EOPNOTSUPP );
3424
+
3425
+ err = ib_copy_from_udata (& ucmd_hdr , udata , min_ucmd_sz );
3426
+ if (err )
3427
+ return ERR_PTR (err );
3428
+
3429
+ /* currently supports only one counters data */
3430
+ if (ucmd_hdr .ncounters_data > 1 )
3431
+ return ERR_PTR (- EINVAL );
3432
+
3433
+ required_ucmd_sz = min_ucmd_sz +
3434
+ sizeof (struct mlx5_ib_flow_counters_data ) *
3435
+ ucmd_hdr .ncounters_data ;
3436
+ if (udata -> inlen > required_ucmd_sz &&
3437
+ !ib_is_udata_cleared (udata , required_ucmd_sz ,
3438
+ udata -> inlen - required_ucmd_sz ))
3439
+ return ERR_PTR (- EOPNOTSUPP );
3440
+
3441
+ ucmd = kzalloc (required_ucmd_sz , GFP_KERNEL );
3442
+ if (!ucmd )
3443
+ return ERR_PTR (- ENOMEM );
3444
+
3445
+ err = ib_copy_from_udata (ucmd , udata , required_ucmd_sz );
3446
+ if (err ) {
3447
+ kfree (ucmd );
3448
+ return ERR_PTR (err );
3449
+ }
3450
+ }
3264
3451
3265
3452
if (flow_attr -> priority > MLX5_IB_FLOW_LAST_PRIO )
3266
3453
return ERR_PTR (- ENOMEM );
@@ -3315,7 +3502,7 @@ static struct ib_flow *mlx5_ib_create_flow(struct ib_qp *qp,
3315
3502
underlay_qpn = (mqp -> flags & MLX5_IB_QP_UNDERLAY ) ?
3316
3503
mqp -> underlay_qpn : 0 ;
3317
3504
handler = _create_flow_rule (dev , ft_prio , flow_attr ,
3318
- dst , underlay_qpn );
3505
+ dst , underlay_qpn , ucmd );
3319
3506
}
3320
3507
} else if (flow_attr -> type == IB_FLOW_ATTR_ALL_DEFAULT ||
3321
3508
flow_attr -> type == IB_FLOW_ATTR_MC_DEFAULT ) {
@@ -3336,6 +3523,7 @@ static struct ib_flow *mlx5_ib_create_flow(struct ib_qp *qp,
3336
3523
3337
3524
mutex_unlock (& dev -> flow_db -> lock );
3338
3525
kfree (dst );
3526
+ kfree (ucmd );
3339
3527
3340
3528
return & handler -> ibflow ;
3341
3529
@@ -3346,6 +3534,7 @@ static struct ib_flow *mlx5_ib_create_flow(struct ib_qp *qp,
3346
3534
unlock :
3347
3535
mutex_unlock (& dev -> flow_db -> lock );
3348
3536
kfree (dst );
3537
+ kfree (ucmd );
3349
3538
kfree (handler );
3350
3539
return ERR_PTR (err );
3351
3540
}
@@ -5010,6 +5199,11 @@ static int mlx5_ib_destroy_counters(struct ib_counters *counters)
5010
5199
{
5011
5200
struct mlx5_ib_mcounters * mcounters = to_mcounters (counters );
5012
5201
5202
+ counters_clear_description (counters );
5203
+ if (mcounters -> hw_cntrs_hndl )
5204
+ mlx5_fc_destroy (to_mdev (counters -> device )-> mdev ,
5205
+ mcounters -> hw_cntrs_hndl );
5206
+
5013
5207
kfree (mcounters );
5014
5208
5015
5209
return 0 ;
@@ -5024,6 +5218,8 @@ static struct ib_counters *mlx5_ib_create_counters(struct ib_device *device,
5024
5218
if (!mcounters )
5025
5219
return ERR_PTR (- ENOMEM );
5026
5220
5221
+ mutex_init (& mcounters -> mcntrs_mutex );
5222
+
5027
5223
return & mcounters -> ibcntrs ;
5028
5224
}
5029
5225
0 commit comments