@@ -70,10 +70,19 @@ static char mlx5_version[] =
70
70
DRIVER_NAME ": Mellanox Connect-IB Infiniband driver v"
71
71
DRIVER_VERSION "\n" ;
72
72
73
+ struct mlx5_ib_event_work {
74
+ struct work_struct work ;
75
+ struct mlx5_core_dev * dev ;
76
+ void * context ;
77
+ enum mlx5_dev_event event ;
78
+ unsigned long param ;
79
+ };
80
+
73
81
enum {
74
82
MLX5_ATOMIC_SIZE_QP_8BYTES = 1 << 3 ,
75
83
};
76
84
85
+ static struct workqueue_struct * mlx5_ib_event_wq ;
77
86
static LIST_HEAD (mlx5_ib_unaffiliated_port_list );
78
87
static LIST_HEAD (mlx5_ib_dev_list );
79
88
/*
@@ -3132,15 +3141,24 @@ static void delay_drop_handler(struct work_struct *work)
3132
3141
mutex_unlock (& delay_drop -> lock );
3133
3142
}
3134
3143
3135
- static void mlx5_ib_event (struct mlx5_core_dev * dev , void * context ,
3136
- enum mlx5_dev_event event , unsigned long param )
3144
+ static void mlx5_ib_handle_event (struct work_struct * _work )
3137
3145
{
3138
- struct mlx5_ib_dev * ibdev = (struct mlx5_ib_dev * )context ;
3146
+ struct mlx5_ib_event_work * work =
3147
+ container_of (_work , struct mlx5_ib_event_work , work );
3148
+ struct mlx5_ib_dev * ibdev ;
3139
3149
struct ib_event ibev ;
3140
3150
bool fatal = false;
3141
3151
u8 port = 0 ;
3142
3152
3143
- switch (event ) {
3153
+ if (mlx5_core_is_mp_slave (work -> dev )) {
3154
+ ibdev = mlx5_ib_get_ibdev_from_mpi (work -> context );
3155
+ if (!ibdev )
3156
+ goto out ;
3157
+ } else {
3158
+ ibdev = work -> context ;
3159
+ }
3160
+
3161
+ switch (work -> event ) {
3144
3162
case MLX5_DEV_EVENT_SYS_ERROR :
3145
3163
ibev .event = IB_EVENT_DEVICE_FATAL ;
3146
3164
mlx5_ib_handle_internal_error (ibdev );
@@ -3150,39 +3168,39 @@ static void mlx5_ib_event(struct mlx5_core_dev *dev, void *context,
3150
3168
case MLX5_DEV_EVENT_PORT_UP :
3151
3169
case MLX5_DEV_EVENT_PORT_DOWN :
3152
3170
case MLX5_DEV_EVENT_PORT_INITIALIZED :
3153
- port = (u8 )param ;
3171
+ port = (u8 )work -> param ;
3154
3172
3155
3173
/* In RoCE, port up/down events are handled in
3156
3174
* mlx5_netdev_event().
3157
3175
*/
3158
3176
if (mlx5_ib_port_link_layer (& ibdev -> ib_dev , port ) ==
3159
3177
IB_LINK_LAYER_ETHERNET )
3160
- return ;
3178
+ goto out ;
3161
3179
3162
- ibev .event = (event == MLX5_DEV_EVENT_PORT_UP ) ?
3180
+ ibev .event = (work -> event == MLX5_DEV_EVENT_PORT_UP ) ?
3163
3181
IB_EVENT_PORT_ACTIVE : IB_EVENT_PORT_ERR ;
3164
3182
break ;
3165
3183
3166
3184
case MLX5_DEV_EVENT_LID_CHANGE :
3167
3185
ibev .event = IB_EVENT_LID_CHANGE ;
3168
- port = (u8 )param ;
3186
+ port = (u8 )work -> param ;
3169
3187
break ;
3170
3188
3171
3189
case MLX5_DEV_EVENT_PKEY_CHANGE :
3172
3190
ibev .event = IB_EVENT_PKEY_CHANGE ;
3173
- port = (u8 )param ;
3191
+ port = (u8 )work -> param ;
3174
3192
3175
3193
schedule_work (& ibdev -> devr .ports [port - 1 ].pkey_change_work );
3176
3194
break ;
3177
3195
3178
3196
case MLX5_DEV_EVENT_GUID_CHANGE :
3179
3197
ibev .event = IB_EVENT_GID_CHANGE ;
3180
- port = (u8 )param ;
3198
+ port = (u8 )work -> param ;
3181
3199
break ;
3182
3200
3183
3201
case MLX5_DEV_EVENT_CLIENT_REREG :
3184
3202
ibev .event = IB_EVENT_CLIENT_REREGISTER ;
3185
- port = (u8 )param ;
3203
+ port = (u8 )work -> param ;
3186
3204
break ;
3187
3205
case MLX5_DEV_EVENT_DELAY_DROP_TIMEOUT :
3188
3206
schedule_work (& ibdev -> delay_drop .delay_drop_work );
@@ -3204,9 +3222,29 @@ static void mlx5_ib_event(struct mlx5_core_dev *dev, void *context,
3204
3222
3205
3223
if (fatal )
3206
3224
ibdev -> ib_active = false;
3207
-
3208
3225
out :
3209
- return ;
3226
+ kfree (work );
3227
+ }
3228
+
3229
+ static void mlx5_ib_event (struct mlx5_core_dev * dev , void * context ,
3230
+ enum mlx5_dev_event event , unsigned long param )
3231
+ {
3232
+ struct mlx5_ib_event_work * work ;
3233
+
3234
+ work = kmalloc (sizeof (* work ), GFP_ATOMIC );
3235
+ if (work ) {
3236
+ INIT_WORK (& work -> work , mlx5_ib_handle_event );
3237
+ work -> dev = dev ;
3238
+ work -> param = param ;
3239
+ work -> context = context ;
3240
+ work -> event = event ;
3241
+
3242
+ queue_work (mlx5_ib_event_wq , & work -> work );
3243
+ return ;
3244
+ }
3245
+
3246
+ dev_warn (& dev -> pdev -> dev , "%s: mlx5_dev_event: %d, with param: %lu dropped, couldn't allocate memory.\n" ,
3247
+ __func__ , event , param );
3210
3248
}
3211
3249
3212
3250
static int set_has_smi_cap (struct mlx5_ib_dev * dev )
@@ -4917,6 +4955,10 @@ static int __init mlx5_ib_init(void)
4917
4955
{
4918
4956
int err ;
4919
4957
4958
+ mlx5_ib_event_wq = alloc_ordered_workqueue ("mlx5_ib_event_wq" , 0 );
4959
+ if (!mlx5_ib_event_wq )
4960
+ return - ENOMEM ;
4961
+
4920
4962
mlx5_ib_odp_init ();
4921
4963
4922
4964
err = mlx5_register_interface (& mlx5_ib_interface );
@@ -4927,6 +4969,7 @@ static int __init mlx5_ib_init(void)
4927
4969
static void __exit mlx5_ib_cleanup (void )
4928
4970
{
4929
4971
mlx5_unregister_interface (& mlx5_ib_interface );
4972
+ destroy_workqueue (mlx5_ib_event_wq );
4930
4973
}
4931
4974
4932
4975
module_init (mlx5_ib_init );
0 commit comments