@@ -328,11 +328,8 @@ static struct ib_uobject *alloc_begin_idr_uobject(const struct uverbs_obj_type *
328
328
static struct ib_uobject * alloc_begin_fd_uobject (const struct uverbs_obj_type * type ,
329
329
struct ib_uverbs_file * ufile )
330
330
{
331
- const struct uverbs_obj_fd_type * fd_type =
332
- container_of (type , struct uverbs_obj_fd_type , type );
333
331
int new_fd ;
334
332
struct ib_uobject * uobj ;
335
- struct file * filp ;
336
333
337
334
new_fd = get_unused_fd_flags (O_CLOEXEC );
338
335
if (new_fd < 0 )
@@ -344,28 +341,8 @@ static struct ib_uobject *alloc_begin_fd_uobject(const struct uverbs_obj_type *t
344
341
return uobj ;
345
342
}
346
343
347
- /*
348
- * The kref for uobj is moved into filp->private data and put in
349
- * uverbs_close_fd(). Once anon_inode_getfile() succeeds
350
- * uverbs_close_fd() must be guaranteed to be called from the provided
351
- * fops release callback. We piggyback our kref of uobj on the stack
352
- * with the lifetime of the struct file.
353
- */
354
- filp = anon_inode_getfile (fd_type -> name ,
355
- fd_type -> fops ,
356
- uobj ,
357
- fd_type -> flags );
358
- if (IS_ERR (filp )) {
359
- put_unused_fd (new_fd );
360
- uverbs_uobject_put (uobj );
361
- return (void * )filp ;
362
- }
363
-
364
344
uobj -> id = new_fd ;
365
- uobj -> object = filp ;
366
345
uobj -> ufile = ufile ;
367
- /* Matching put will be done in uverbs_close_fd() */
368
- kref_get (& ufile -> ref );
369
346
370
347
return uobj ;
371
348
}
@@ -407,12 +384,10 @@ static int __must_check remove_commit_idr_uobject(struct ib_uobject *uobj,
407
384
408
385
static void alloc_abort_fd_uobject (struct ib_uobject * uobj )
409
386
{
410
- struct file * filp = uobj -> object ;
411
- int id = uobj -> id ;
387
+ put_unused_fd (uobj -> id );
412
388
413
- /* Unsuccessful NEW */
414
- fput (filp );
415
- put_unused_fd (id );
389
+ /* Pairs with the kref from alloc_begin_idr_uobject */
390
+ uverbs_uobject_put (uobj );
416
391
}
417
392
418
393
static int __must_check remove_commit_fd_uobject (struct ib_uobject * uobj ,
@@ -500,7 +475,7 @@ int rdma_explicit_destroy(struct ib_uobject *uobject)
500
475
return ret ;
501
476
}
502
477
503
- static void alloc_commit_idr_uobject (struct ib_uobject * uobj )
478
+ static int alloc_commit_idr_uobject (struct ib_uobject * uobj )
504
479
{
505
480
struct ib_uverbs_file * ufile = uobj -> ufile ;
506
481
@@ -514,11 +489,34 @@ static void alloc_commit_idr_uobject(struct ib_uobject *uobj)
514
489
*/
515
490
WARN_ON (idr_replace (& ufile -> idr , uobj , uobj -> id ));
516
491
spin_unlock (& ufile -> idr_lock );
492
+
493
+ return 0 ;
517
494
}
518
495
519
- static void alloc_commit_fd_uobject (struct ib_uobject * uobj )
496
+ static int alloc_commit_fd_uobject (struct ib_uobject * uobj )
520
497
{
498
+ const struct uverbs_obj_fd_type * fd_type =
499
+ container_of (uobj -> type , struct uverbs_obj_fd_type , type );
521
500
int fd = uobj -> id ;
501
+ struct file * filp ;
502
+
503
+ /*
504
+ * The kref for uobj is moved into filp->private data and put in
505
+ * uverbs_close_fd(). Once alloc_commit() succeeds uverbs_close_fd()
506
+ * must be guaranteed to be called from the provided fops release
507
+ * callback.
508
+ */
509
+ filp = anon_inode_getfile (fd_type -> name ,
510
+ fd_type -> fops ,
511
+ uobj ,
512
+ fd_type -> flags );
513
+ if (IS_ERR (filp ))
514
+ return PTR_ERR (filp );
515
+
516
+ uobj -> object = filp ;
517
+
518
+ /* Matching put will be done in uverbs_close_fd() */
519
+ kref_get (& uobj -> ufile -> ref );
522
520
523
521
/* This shouldn't be used anymore. Use the file object instead */
524
522
uobj -> id = 0 ;
@@ -527,7 +525,9 @@ static void alloc_commit_fd_uobject(struct ib_uobject *uobj)
527
525
* NOTE: Once we install the file we loose ownership of our kref on
528
526
* uobj. It will be put by uverbs_close_fd()
529
527
*/
530
- fd_install (fd , uobj -> object );
528
+ fd_install (fd , filp );
529
+
530
+ return 0 ;
531
531
}
532
532
533
533
/*
@@ -538,11 +538,10 @@ static void alloc_commit_fd_uobject(struct ib_uobject *uobj)
538
538
int __must_check rdma_alloc_commit_uobject (struct ib_uobject * uobj )
539
539
{
540
540
struct ib_uverbs_file * ufile = uobj -> ufile ;
541
+ int ret ;
541
542
542
543
/* Cleanup is running. Calling this should have been impossible */
543
544
if (!down_read_trylock (& ufile -> hw_destroy_rwsem )) {
544
- int ret ;
545
-
546
545
WARN (true, "ib_uverbs: Cleanup is running while allocating an uobject\n" );
547
546
ret = uobj -> type -> type_class -> remove_commit (uobj ,
548
547
RDMA_REMOVE_DURING_CLEANUP );
@@ -552,18 +551,28 @@ int __must_check rdma_alloc_commit_uobject(struct ib_uobject *uobj)
552
551
return ret ;
553
552
}
554
553
555
- /* matches atomic_set(-1) in alloc_uobj */
556
554
assert_uverbs_usecnt (uobj , true);
557
- atomic_set (& uobj -> usecnt , 0 );
555
+
556
+ /* alloc_commit consumes the uobj kref */
557
+ ret = uobj -> type -> type_class -> alloc_commit (uobj );
558
+ if (ret ) {
559
+ if (uobj -> type -> type_class -> remove_commit (
560
+ uobj , RDMA_REMOVE_DURING_CLEANUP ))
561
+ pr_warn ("ib_uverbs: cleanup of idr object %d failed\n" ,
562
+ uobj -> id );
563
+ up_read (& ufile -> hw_destroy_rwsem );
564
+ return ret ;
565
+ }
558
566
559
567
/* kref is held so long as the uobj is on the uobj list. */
560
568
uverbs_uobject_get (uobj );
561
569
spin_lock_irq (& ufile -> uobjects_lock );
562
570
list_add (& uobj -> list , & ufile -> uobjects );
563
571
spin_unlock_irq (& ufile -> uobjects_lock );
564
572
565
- /* alloc_commit consumes the uobj kref */
566
- uobj -> type -> type_class -> alloc_commit (uobj );
573
+ /* matches atomic_set(-1) in alloc_uobj */
574
+ atomic_set (& uobj -> usecnt , 0 );
575
+
567
576
up_read (& ufile -> hw_destroy_rwsem );
568
577
569
578
return 0 ;
0 commit comments