@@ -58,6 +58,7 @@ static struct uverbs_lock_class srq_lock_class = { .name = "SRQ-uobj" };
58
58
static struct uverbs_lock_class xrcd_lock_class = { .name = "XRCD-uobj" };
59
59
static struct uverbs_lock_class rule_lock_class = { .name = "RULE-uobj" };
60
60
static struct uverbs_lock_class wq_lock_class = { .name = "WQ-uobj" };
61
+ static struct uverbs_lock_class rwq_ind_table_lock_class = { .name = "IND_TBL-uobj" };
61
62
62
63
/*
63
64
* The ib_uobject locking scheme is as follows:
@@ -338,6 +339,7 @@ ssize_t ib_uverbs_get_context(struct ib_uverbs_file *file,
338
339
INIT_LIST_HEAD (& ucontext -> srq_list );
339
340
INIT_LIST_HEAD (& ucontext -> ah_list );
340
341
INIT_LIST_HEAD (& ucontext -> wq_list );
342
+ INIT_LIST_HEAD (& ucontext -> rwq_ind_tbl_list );
341
343
INIT_LIST_HEAD (& ucontext -> xrcd_list );
342
344
INIT_LIST_HEAD (& ucontext -> rule_list );
343
345
rcu_read_lock ();
@@ -3299,6 +3301,214 @@ int ib_uverbs_ex_modify_wq(struct ib_uverbs_file *file,
3299
3301
return ret ;
3300
3302
}
3301
3303
3304
+ int ib_uverbs_ex_create_rwq_ind_table (struct ib_uverbs_file * file ,
3305
+ struct ib_device * ib_dev ,
3306
+ struct ib_udata * ucore ,
3307
+ struct ib_udata * uhw )
3308
+ {
3309
+ struct ib_uverbs_ex_create_rwq_ind_table cmd = {};
3310
+ struct ib_uverbs_ex_create_rwq_ind_table_resp resp = {};
3311
+ struct ib_uobject * uobj ;
3312
+ int err = 0 ;
3313
+ struct ib_rwq_ind_table_init_attr init_attr = {};
3314
+ struct ib_rwq_ind_table * rwq_ind_tbl ;
3315
+ struct ib_wq * * wqs = NULL ;
3316
+ u32 * wqs_handles = NULL ;
3317
+ struct ib_wq * wq = NULL ;
3318
+ int i , j , num_read_wqs ;
3319
+ u32 num_wq_handles ;
3320
+ u32 expected_in_size ;
3321
+ size_t required_cmd_sz_header ;
3322
+ size_t required_resp_len ;
3323
+
3324
+ required_cmd_sz_header = offsetof(typeof (cmd ), log_ind_tbl_size ) + sizeof (cmd .log_ind_tbl_size );
3325
+ required_resp_len = offsetof(typeof (resp ), ind_tbl_num ) + sizeof (resp .ind_tbl_num );
3326
+
3327
+ if (ucore -> inlen < required_cmd_sz_header )
3328
+ return - EINVAL ;
3329
+
3330
+ if (ucore -> outlen < required_resp_len )
3331
+ return - ENOSPC ;
3332
+
3333
+ err = ib_copy_from_udata (& cmd , ucore , required_cmd_sz_header );
3334
+ if (err )
3335
+ return err ;
3336
+
3337
+ ucore -> inbuf += required_cmd_sz_header ;
3338
+ ucore -> inlen -= required_cmd_sz_header ;
3339
+
3340
+ if (cmd .comp_mask )
3341
+ return - EOPNOTSUPP ;
3342
+
3343
+ if (cmd .log_ind_tbl_size > IB_USER_VERBS_MAX_LOG_IND_TBL_SIZE )
3344
+ return - EINVAL ;
3345
+
3346
+ num_wq_handles = 1 << cmd .log_ind_tbl_size ;
3347
+ expected_in_size = num_wq_handles * sizeof (__u32 );
3348
+ if (num_wq_handles == 1 )
3349
+ /* input size for wq handles is u64 aligned */
3350
+ expected_in_size += sizeof (__u32 );
3351
+
3352
+ if (ucore -> inlen < expected_in_size )
3353
+ return - EINVAL ;
3354
+
3355
+ if (ucore -> inlen > expected_in_size &&
3356
+ !ib_is_udata_cleared (ucore , expected_in_size ,
3357
+ ucore -> inlen - expected_in_size ))
3358
+ return - EOPNOTSUPP ;
3359
+
3360
+ wqs_handles = kcalloc (num_wq_handles , sizeof (* wqs_handles ),
3361
+ GFP_KERNEL );
3362
+ if (!wqs_handles )
3363
+ return - ENOMEM ;
3364
+
3365
+ err = ib_copy_from_udata (wqs_handles , ucore ,
3366
+ num_wq_handles * sizeof (__u32 ));
3367
+ if (err )
3368
+ goto err_free ;
3369
+
3370
+ wqs = kcalloc (num_wq_handles , sizeof (* wqs ), GFP_KERNEL );
3371
+ if (!wqs ) {
3372
+ err = - ENOMEM ;
3373
+ goto err_free ;
3374
+ }
3375
+
3376
+ for (num_read_wqs = 0 ; num_read_wqs < num_wq_handles ;
3377
+ num_read_wqs ++ ) {
3378
+ wq = idr_read_wq (wqs_handles [num_read_wqs ], file -> ucontext );
3379
+ if (!wq ) {
3380
+ err = - EINVAL ;
3381
+ goto put_wqs ;
3382
+ }
3383
+
3384
+ wqs [num_read_wqs ] = wq ;
3385
+ }
3386
+
3387
+ uobj = kmalloc (sizeof (* uobj ), GFP_KERNEL );
3388
+ if (!uobj ) {
3389
+ err = - ENOMEM ;
3390
+ goto put_wqs ;
3391
+ }
3392
+
3393
+ init_uobj (uobj , 0 , file -> ucontext , & rwq_ind_table_lock_class );
3394
+ down_write (& uobj -> mutex );
3395
+ init_attr .log_ind_tbl_size = cmd .log_ind_tbl_size ;
3396
+ init_attr .ind_tbl = wqs ;
3397
+ rwq_ind_tbl = ib_dev -> create_rwq_ind_table (ib_dev , & init_attr , uhw );
3398
+
3399
+ if (IS_ERR (rwq_ind_tbl )) {
3400
+ err = PTR_ERR (rwq_ind_tbl );
3401
+ goto err_uobj ;
3402
+ }
3403
+
3404
+ rwq_ind_tbl -> ind_tbl = wqs ;
3405
+ rwq_ind_tbl -> log_ind_tbl_size = init_attr .log_ind_tbl_size ;
3406
+ rwq_ind_tbl -> uobject = uobj ;
3407
+ uobj -> object = rwq_ind_tbl ;
3408
+ rwq_ind_tbl -> device = ib_dev ;
3409
+ atomic_set (& rwq_ind_tbl -> usecnt , 0 );
3410
+
3411
+ for (i = 0 ; i < num_wq_handles ; i ++ )
3412
+ atomic_inc (& wqs [i ]-> usecnt );
3413
+
3414
+ err = idr_add_uobj (& ib_uverbs_rwq_ind_tbl_idr , uobj );
3415
+ if (err )
3416
+ goto destroy_ind_tbl ;
3417
+
3418
+ resp .ind_tbl_handle = uobj -> id ;
3419
+ resp .ind_tbl_num = rwq_ind_tbl -> ind_tbl_num ;
3420
+ resp .response_length = required_resp_len ;
3421
+
3422
+ err = ib_copy_to_udata (ucore ,
3423
+ & resp , resp .response_length );
3424
+ if (err )
3425
+ goto err_copy ;
3426
+
3427
+ kfree (wqs_handles );
3428
+
3429
+ for (j = 0 ; j < num_read_wqs ; j ++ )
3430
+ put_wq_read (wqs [j ]);
3431
+
3432
+ mutex_lock (& file -> mutex );
3433
+ list_add_tail (& uobj -> list , & file -> ucontext -> rwq_ind_tbl_list );
3434
+ mutex_unlock (& file -> mutex );
3435
+
3436
+ uobj -> live = 1 ;
3437
+
3438
+ up_write (& uobj -> mutex );
3439
+ return 0 ;
3440
+
3441
+ err_copy :
3442
+ idr_remove_uobj (& ib_uverbs_rwq_ind_tbl_idr , uobj );
3443
+ destroy_ind_tbl :
3444
+ ib_destroy_rwq_ind_table (rwq_ind_tbl );
3445
+ err_uobj :
3446
+ put_uobj_write (uobj );
3447
+ put_wqs :
3448
+ for (j = 0 ; j < num_read_wqs ; j ++ )
3449
+ put_wq_read (wqs [j ]);
3450
+ err_free :
3451
+ kfree (wqs_handles );
3452
+ kfree (wqs );
3453
+ return err ;
3454
+ }
3455
+
3456
+ int ib_uverbs_ex_destroy_rwq_ind_table (struct ib_uverbs_file * file ,
3457
+ struct ib_device * ib_dev ,
3458
+ struct ib_udata * ucore ,
3459
+ struct ib_udata * uhw )
3460
+ {
3461
+ struct ib_uverbs_ex_destroy_rwq_ind_table cmd = {};
3462
+ struct ib_rwq_ind_table * rwq_ind_tbl ;
3463
+ struct ib_uobject * uobj ;
3464
+ int ret ;
3465
+ struct ib_wq * * ind_tbl ;
3466
+ size_t required_cmd_sz ;
3467
+
3468
+ required_cmd_sz = offsetof(typeof (cmd ), ind_tbl_handle ) + sizeof (cmd .ind_tbl_handle );
3469
+
3470
+ if (ucore -> inlen < required_cmd_sz )
3471
+ return - EINVAL ;
3472
+
3473
+ if (ucore -> inlen > sizeof (cmd ) &&
3474
+ !ib_is_udata_cleared (ucore , sizeof (cmd ),
3475
+ ucore -> inlen - sizeof (cmd )))
3476
+ return - EOPNOTSUPP ;
3477
+
3478
+ ret = ib_copy_from_udata (& cmd , ucore , min (sizeof (cmd ), ucore -> inlen ));
3479
+ if (ret )
3480
+ return ret ;
3481
+
3482
+ if (cmd .comp_mask )
3483
+ return - EOPNOTSUPP ;
3484
+
3485
+ uobj = idr_write_uobj (& ib_uverbs_rwq_ind_tbl_idr , cmd .ind_tbl_handle ,
3486
+ file -> ucontext );
3487
+ if (!uobj )
3488
+ return - EINVAL ;
3489
+ rwq_ind_tbl = uobj -> object ;
3490
+ ind_tbl = rwq_ind_tbl -> ind_tbl ;
3491
+
3492
+ ret = ib_destroy_rwq_ind_table (rwq_ind_tbl );
3493
+ if (!ret )
3494
+ uobj -> live = 0 ;
3495
+
3496
+ put_uobj_write (uobj );
3497
+
3498
+ if (ret )
3499
+ return ret ;
3500
+
3501
+ idr_remove_uobj (& ib_uverbs_rwq_ind_tbl_idr , uobj );
3502
+
3503
+ mutex_lock (& file -> mutex );
3504
+ list_del (& uobj -> list );
3505
+ mutex_unlock (& file -> mutex );
3506
+
3507
+ put_uobj (uobj );
3508
+ kfree (ind_tbl );
3509
+ return ret ;
3510
+ }
3511
+
3302
3512
int ib_uverbs_ex_create_flow (struct ib_uverbs_file * file ,
3303
3513
struct ib_device * ib_dev ,
3304
3514
struct ib_udata * ucore ,
0 commit comments