@@ -57,6 +57,7 @@ static struct uverbs_lock_class ah_lock_class = { .name = "AH-uobj" };
57
57
static struct uverbs_lock_class srq_lock_class = { .name = "SRQ-uobj" };
58
58
static struct uverbs_lock_class xrcd_lock_class = { .name = "XRCD-uobj" };
59
59
static struct uverbs_lock_class rule_lock_class = { .name = "RULE-uobj" };
60
+ static struct uverbs_lock_class wq_lock_class = { .name = "WQ-uobj" };
60
61
61
62
/*
62
63
* The ib_uobject locking scheme is as follows:
@@ -243,6 +244,16 @@ static struct ib_qp *idr_read_qp(int qp_handle, struct ib_ucontext *context)
243
244
return idr_read_obj (& ib_uverbs_qp_idr , qp_handle , context , 0 );
244
245
}
245
246
247
+ static struct ib_wq * idr_read_wq (int wq_handle , struct ib_ucontext * context )
248
+ {
249
+ return idr_read_obj (& ib_uverbs_wq_idr , wq_handle , context , 0 );
250
+ }
251
+
252
+ static void put_wq_read (struct ib_wq * wq )
253
+ {
254
+ put_uobj_read (wq -> uobject );
255
+ }
256
+
246
257
static struct ib_qp * idr_write_qp (int qp_handle , struct ib_ucontext * context )
247
258
{
248
259
struct ib_uobject * uobj ;
@@ -326,6 +337,7 @@ ssize_t ib_uverbs_get_context(struct ib_uverbs_file *file,
326
337
INIT_LIST_HEAD (& ucontext -> qp_list );
327
338
INIT_LIST_HEAD (& ucontext -> srq_list );
328
339
INIT_LIST_HEAD (& ucontext -> ah_list );
340
+ INIT_LIST_HEAD (& ucontext -> wq_list );
329
341
INIT_LIST_HEAD (& ucontext -> xrcd_list );
330
342
INIT_LIST_HEAD (& ucontext -> rule_list );
331
343
rcu_read_lock ();
@@ -3056,6 +3068,237 @@ static int kern_spec_to_ib_spec(struct ib_uverbs_flow_spec *kern_spec,
3056
3068
return 0 ;
3057
3069
}
3058
3070
3071
+ int ib_uverbs_ex_create_wq (struct ib_uverbs_file * file ,
3072
+ struct ib_device * ib_dev ,
3073
+ struct ib_udata * ucore ,
3074
+ struct ib_udata * uhw )
3075
+ {
3076
+ struct ib_uverbs_ex_create_wq cmd = {};
3077
+ struct ib_uverbs_ex_create_wq_resp resp = {};
3078
+ struct ib_uwq_object * obj ;
3079
+ int err = 0 ;
3080
+ struct ib_cq * cq ;
3081
+ struct ib_pd * pd ;
3082
+ struct ib_wq * wq ;
3083
+ struct ib_wq_init_attr wq_init_attr = {};
3084
+ size_t required_cmd_sz ;
3085
+ size_t required_resp_len ;
3086
+
3087
+ required_cmd_sz = offsetof(typeof (cmd ), max_sge ) + sizeof (cmd .max_sge );
3088
+ required_resp_len = offsetof(typeof (resp ), wqn ) + sizeof (resp .wqn );
3089
+
3090
+ if (ucore -> inlen < required_cmd_sz )
3091
+ return - EINVAL ;
3092
+
3093
+ if (ucore -> outlen < required_resp_len )
3094
+ return - ENOSPC ;
3095
+
3096
+ if (ucore -> inlen > sizeof (cmd ) &&
3097
+ !ib_is_udata_cleared (ucore , sizeof (cmd ),
3098
+ ucore -> inlen - sizeof (cmd )))
3099
+ return - EOPNOTSUPP ;
3100
+
3101
+ err = ib_copy_from_udata (& cmd , ucore , min (sizeof (cmd ), ucore -> inlen ));
3102
+ if (err )
3103
+ return err ;
3104
+
3105
+ if (cmd .comp_mask )
3106
+ return - EOPNOTSUPP ;
3107
+
3108
+ obj = kmalloc (sizeof (* obj ), GFP_KERNEL );
3109
+ if (!obj )
3110
+ return - ENOMEM ;
3111
+
3112
+ init_uobj (& obj -> uevent .uobject , cmd .user_handle , file -> ucontext ,
3113
+ & wq_lock_class );
3114
+ down_write (& obj -> uevent .uobject .mutex );
3115
+ pd = idr_read_pd (cmd .pd_handle , file -> ucontext );
3116
+ if (!pd ) {
3117
+ err = - EINVAL ;
3118
+ goto err_uobj ;
3119
+ }
3120
+
3121
+ cq = idr_read_cq (cmd .cq_handle , file -> ucontext , 0 );
3122
+ if (!cq ) {
3123
+ err = - EINVAL ;
3124
+ goto err_put_pd ;
3125
+ }
3126
+
3127
+ wq_init_attr .cq = cq ;
3128
+ wq_init_attr .max_sge = cmd .max_sge ;
3129
+ wq_init_attr .max_wr = cmd .max_wr ;
3130
+ wq_init_attr .wq_context = file ;
3131
+ wq_init_attr .wq_type = cmd .wq_type ;
3132
+ wq_init_attr .event_handler = ib_uverbs_wq_event_handler ;
3133
+ obj -> uevent .events_reported = 0 ;
3134
+ INIT_LIST_HEAD (& obj -> uevent .event_list );
3135
+ wq = pd -> device -> create_wq (pd , & wq_init_attr , uhw );
3136
+ if (IS_ERR (wq )) {
3137
+ err = PTR_ERR (wq );
3138
+ goto err_put_cq ;
3139
+ }
3140
+
3141
+ wq -> uobject = & obj -> uevent .uobject ;
3142
+ obj -> uevent .uobject .object = wq ;
3143
+ wq -> wq_type = wq_init_attr .wq_type ;
3144
+ wq -> cq = cq ;
3145
+ wq -> pd = pd ;
3146
+ wq -> device = pd -> device ;
3147
+ wq -> wq_context = wq_init_attr .wq_context ;
3148
+ atomic_set (& wq -> usecnt , 0 );
3149
+ atomic_inc (& pd -> usecnt );
3150
+ atomic_inc (& cq -> usecnt );
3151
+ wq -> uobject = & obj -> uevent .uobject ;
3152
+ obj -> uevent .uobject .object = wq ;
3153
+ err = idr_add_uobj (& ib_uverbs_wq_idr , & obj -> uevent .uobject );
3154
+ if (err )
3155
+ goto destroy_wq ;
3156
+
3157
+ memset (& resp , 0 , sizeof (resp ));
3158
+ resp .wq_handle = obj -> uevent .uobject .id ;
3159
+ resp .max_sge = wq_init_attr .max_sge ;
3160
+ resp .max_wr = wq_init_attr .max_wr ;
3161
+ resp .wqn = wq -> wq_num ;
3162
+ resp .response_length = required_resp_len ;
3163
+ err = ib_copy_to_udata (ucore ,
3164
+ & resp , resp .response_length );
3165
+ if (err )
3166
+ goto err_copy ;
3167
+
3168
+ put_pd_read (pd );
3169
+ put_cq_read (cq );
3170
+
3171
+ mutex_lock (& file -> mutex );
3172
+ list_add_tail (& obj -> uevent .uobject .list , & file -> ucontext -> wq_list );
3173
+ mutex_unlock (& file -> mutex );
3174
+
3175
+ obj -> uevent .uobject .live = 1 ;
3176
+ up_write (& obj -> uevent .uobject .mutex );
3177
+ return 0 ;
3178
+
3179
+ err_copy :
3180
+ idr_remove_uobj (& ib_uverbs_wq_idr , & obj -> uevent .uobject );
3181
+ destroy_wq :
3182
+ ib_destroy_wq (wq );
3183
+ err_put_cq :
3184
+ put_cq_read (cq );
3185
+ err_put_pd :
3186
+ put_pd_read (pd );
3187
+ err_uobj :
3188
+ put_uobj_write (& obj -> uevent .uobject );
3189
+
3190
+ return err ;
3191
+ }
3192
+
3193
+ int ib_uverbs_ex_destroy_wq (struct ib_uverbs_file * file ,
3194
+ struct ib_device * ib_dev ,
3195
+ struct ib_udata * ucore ,
3196
+ struct ib_udata * uhw )
3197
+ {
3198
+ struct ib_uverbs_ex_destroy_wq cmd = {};
3199
+ struct ib_uverbs_ex_destroy_wq_resp resp = {};
3200
+ struct ib_wq * wq ;
3201
+ struct ib_uobject * uobj ;
3202
+ struct ib_uwq_object * obj ;
3203
+ size_t required_cmd_sz ;
3204
+ size_t required_resp_len ;
3205
+ int ret ;
3206
+
3207
+ required_cmd_sz = offsetof(typeof (cmd ), wq_handle ) + sizeof (cmd .wq_handle );
3208
+ required_resp_len = offsetof(typeof (resp ), reserved ) + sizeof (resp .reserved );
3209
+
3210
+ if (ucore -> inlen < required_cmd_sz )
3211
+ return - EINVAL ;
3212
+
3213
+ if (ucore -> outlen < required_resp_len )
3214
+ return - ENOSPC ;
3215
+
3216
+ if (ucore -> inlen > sizeof (cmd ) &&
3217
+ !ib_is_udata_cleared (ucore , sizeof (cmd ),
3218
+ ucore -> inlen - sizeof (cmd )))
3219
+ return - EOPNOTSUPP ;
3220
+
3221
+ ret = ib_copy_from_udata (& cmd , ucore , min (sizeof (cmd ), ucore -> inlen ));
3222
+ if (ret )
3223
+ return ret ;
3224
+
3225
+ if (cmd .comp_mask )
3226
+ return - EOPNOTSUPP ;
3227
+
3228
+ resp .response_length = required_resp_len ;
3229
+ uobj = idr_write_uobj (& ib_uverbs_wq_idr , cmd .wq_handle ,
3230
+ file -> ucontext );
3231
+ if (!uobj )
3232
+ return - EINVAL ;
3233
+
3234
+ wq = uobj -> object ;
3235
+ obj = container_of (uobj , struct ib_uwq_object , uevent .uobject );
3236
+ ret = ib_destroy_wq (wq );
3237
+ if (!ret )
3238
+ uobj -> live = 0 ;
3239
+
3240
+ put_uobj_write (uobj );
3241
+ if (ret )
3242
+ return ret ;
3243
+
3244
+ idr_remove_uobj (& ib_uverbs_wq_idr , uobj );
3245
+
3246
+ mutex_lock (& file -> mutex );
3247
+ list_del (& uobj -> list );
3248
+ mutex_unlock (& file -> mutex );
3249
+
3250
+ ib_uverbs_release_uevent (file , & obj -> uevent );
3251
+ resp .events_reported = obj -> uevent .events_reported ;
3252
+ put_uobj (uobj );
3253
+
3254
+ ret = ib_copy_to_udata (ucore , & resp , resp .response_length );
3255
+ if (ret )
3256
+ return ret ;
3257
+
3258
+ return 0 ;
3259
+ }
3260
+
3261
+ int ib_uverbs_ex_modify_wq (struct ib_uverbs_file * file ,
3262
+ struct ib_device * ib_dev ,
3263
+ struct ib_udata * ucore ,
3264
+ struct ib_udata * uhw )
3265
+ {
3266
+ struct ib_uverbs_ex_modify_wq cmd = {};
3267
+ struct ib_wq * wq ;
3268
+ struct ib_wq_attr wq_attr = {};
3269
+ size_t required_cmd_sz ;
3270
+ int ret ;
3271
+
3272
+ required_cmd_sz = offsetof(typeof (cmd ), curr_wq_state ) + sizeof (cmd .curr_wq_state );
3273
+ if (ucore -> inlen < required_cmd_sz )
3274
+ return - EINVAL ;
3275
+
3276
+ if (ucore -> inlen > sizeof (cmd ) &&
3277
+ !ib_is_udata_cleared (ucore , sizeof (cmd ),
3278
+ ucore -> inlen - sizeof (cmd )))
3279
+ return - EOPNOTSUPP ;
3280
+
3281
+ ret = ib_copy_from_udata (& cmd , ucore , min (sizeof (cmd ), ucore -> inlen ));
3282
+ if (ret )
3283
+ return ret ;
3284
+
3285
+ if (!cmd .attr_mask )
3286
+ return - EINVAL ;
3287
+
3288
+ if (cmd .attr_mask > (IB_WQ_STATE | IB_WQ_CUR_STATE ))
3289
+ return - EINVAL ;
3290
+
3291
+ wq = idr_read_wq (cmd .wq_handle , file -> ucontext );
3292
+ if (!wq )
3293
+ return - EINVAL ;
3294
+
3295
+ wq_attr .curr_wq_state = cmd .curr_wq_state ;
3296
+ wq_attr .wq_state = cmd .wq_state ;
3297
+ ret = wq -> device -> modify_wq (wq , & wq_attr , cmd .attr_mask , uhw );
3298
+ put_wq_read (wq );
3299
+ return ret ;
3300
+ }
3301
+
3059
3302
int ib_uverbs_ex_create_flow (struct ib_uverbs_file * file ,
3060
3303
struct ib_device * ib_dev ,
3061
3304
struct ib_udata * ucore ,
0 commit comments