@@ -153,6 +153,37 @@ static struct ib_uobject *lookup_get_idr_uobject(const struct uverbs_obj_type *t
153
153
return uobj ;
154
154
}
155
155
156
+ static struct ib_uobject * lookup_get_fd_uobject (const struct uverbs_obj_type * type ,
157
+ struct ib_ucontext * ucontext ,
158
+ int id , bool write )
159
+ {
160
+ struct file * f ;
161
+ struct ib_uobject * uobject ;
162
+ const struct uverbs_obj_fd_type * fd_type =
163
+ container_of (type , struct uverbs_obj_fd_type , type );
164
+
165
+ if (write )
166
+ return ERR_PTR (- EOPNOTSUPP );
167
+
168
+ f = fget (id );
169
+ if (!f )
170
+ return ERR_PTR (- EBADF );
171
+
172
+ uobject = f -> private_data ;
173
+ /*
174
+ * fget(id) ensures we are not currently running uverbs_close_fd,
175
+ * and the caller is expected to ensure that uverbs_close_fd is never
176
+ * done while a call top lookup is possible.
177
+ */
178
+ if (f -> f_op != fd_type -> fops ) {
179
+ fput (f );
180
+ return ERR_PTR (- EBADF );
181
+ }
182
+
183
+ uverbs_uobject_get (uobject );
184
+ return uobject ;
185
+ }
186
+
156
187
struct ib_uobject * rdma_lookup_get_uobject (const struct uverbs_obj_type * type ,
157
188
struct ib_ucontext * ucontext ,
158
189
int id , bool write )
@@ -211,6 +242,46 @@ static struct ib_uobject *alloc_begin_idr_uobject(const struct uverbs_obj_type *
211
242
return ERR_PTR (ret );
212
243
}
213
244
245
+ static struct ib_uobject * alloc_begin_fd_uobject (const struct uverbs_obj_type * type ,
246
+ struct ib_ucontext * ucontext )
247
+ {
248
+ const struct uverbs_obj_fd_type * fd_type =
249
+ container_of (type , struct uverbs_obj_fd_type , type );
250
+ int new_fd ;
251
+ struct ib_uobject * uobj ;
252
+ struct ib_uobject_file * uobj_file ;
253
+ struct file * filp ;
254
+
255
+ new_fd = get_unused_fd_flags (O_CLOEXEC );
256
+ if (new_fd < 0 )
257
+ return ERR_PTR (new_fd );
258
+
259
+ uobj = alloc_uobj (ucontext , type );
260
+ if (IS_ERR (uobj )) {
261
+ put_unused_fd (new_fd );
262
+ return uobj ;
263
+ }
264
+
265
+ uobj_file = container_of (uobj , struct ib_uobject_file , uobj );
266
+ filp = anon_inode_getfile (fd_type -> name ,
267
+ fd_type -> fops ,
268
+ uobj_file ,
269
+ fd_type -> flags );
270
+ if (IS_ERR (filp )) {
271
+ put_unused_fd (new_fd );
272
+ uverbs_uobject_put (uobj );
273
+ return (void * )filp ;
274
+ }
275
+
276
+ uobj_file -> uobj .id = new_fd ;
277
+ uobj_file -> uobj .object = filp ;
278
+ uobj_file -> ufile = ucontext -> ufile ;
279
+ INIT_LIST_HEAD (& uobj -> list );
280
+ kref_get (& uobj_file -> ufile -> ref );
281
+
282
+ return uobj ;
283
+ }
284
+
214
285
struct ib_uobject * rdma_alloc_begin_uobject (const struct uverbs_obj_type * type ,
215
286
struct ib_ucontext * ucontext )
216
287
{
@@ -246,6 +317,39 @@ static int __must_check remove_commit_idr_uobject(struct ib_uobject *uobj,
246
317
return ret ;
247
318
}
248
319
320
+ static void alloc_abort_fd_uobject (struct ib_uobject * uobj )
321
+ {
322
+ struct ib_uobject_file * uobj_file =
323
+ container_of (uobj , struct ib_uobject_file , uobj );
324
+ struct file * filp = uobj -> object ;
325
+ int id = uobj_file -> uobj .id ;
326
+
327
+ /* Unsuccessful NEW */
328
+ fput (filp );
329
+ put_unused_fd (id );
330
+ }
331
+
332
+ static int __must_check remove_commit_fd_uobject (struct ib_uobject * uobj ,
333
+ enum rdma_remove_reason why )
334
+ {
335
+ const struct uverbs_obj_fd_type * fd_type =
336
+ container_of (uobj -> type , struct uverbs_obj_fd_type , type );
337
+ struct ib_uobject_file * uobj_file =
338
+ container_of (uobj , struct ib_uobject_file , uobj );
339
+ int ret = fd_type -> context_closed (uobj_file , why );
340
+
341
+ if (why == RDMA_REMOVE_DESTROY && ret )
342
+ return ret ;
343
+
344
+ if (why == RDMA_REMOVE_DURING_CLEANUP ) {
345
+ alloc_abort_fd_uobject (uobj );
346
+ return ret ;
347
+ }
348
+
349
+ uobj_file -> uobj .context = NULL ;
350
+ return ret ;
351
+ }
352
+
249
353
static void lockdep_check (struct ib_uobject * uobj , bool write )
250
354
{
251
355
#ifdef CONFIG_LOCKDEP
@@ -314,6 +418,19 @@ static void alloc_commit_idr_uobject(struct ib_uobject *uobj)
314
418
spin_unlock (& uobj -> context -> ufile -> idr_lock );
315
419
}
316
420
421
+ static void alloc_commit_fd_uobject (struct ib_uobject * uobj )
422
+ {
423
+ struct ib_uobject_file * uobj_file =
424
+ container_of (uobj , struct ib_uobject_file , uobj );
425
+
426
+ uverbs_uobject_add (& uobj_file -> uobj );
427
+ fd_install (uobj_file -> uobj .id , uobj -> object );
428
+ /* This shouldn't be used anymore. Use the file object instead */
429
+ uobj_file -> uobj .id = 0 ;
430
+ /* Get another reference as we export this to the fops */
431
+ uverbs_uobject_get (& uobj_file -> uobj );
432
+ }
433
+
317
434
int rdma_alloc_commit_uobject (struct ib_uobject * uobj )
318
435
{
319
436
/* Cleanup is running. Calling this should have been impossible */
@@ -352,6 +469,15 @@ static void lookup_put_idr_uobject(struct ib_uobject *uobj, bool write)
352
469
{
353
470
}
354
471
472
+ static void lookup_put_fd_uobject (struct ib_uobject * uobj , bool write )
473
+ {
474
+ struct file * filp = uobj -> object ;
475
+
476
+ WARN_ON (write );
477
+ /* This indirectly calls uverbs_close_fd and free the object */
478
+ fput (filp );
479
+ }
480
+
355
481
void rdma_lookup_put_uobject (struct ib_uobject * uobj , bool write )
356
482
{
357
483
lockdep_check (uobj , write );
@@ -392,6 +518,39 @@ const struct uverbs_obj_type_class uverbs_idr_class = {
392
518
.needs_kfree_rcu = true,
393
519
};
394
520
521
+ static void _uverbs_close_fd (struct ib_uobject_file * uobj_file )
522
+ {
523
+ struct ib_ucontext * ucontext ;
524
+ struct ib_uverbs_file * ufile = uobj_file -> ufile ;
525
+ int ret ;
526
+
527
+ mutex_lock (& uobj_file -> ufile -> cleanup_mutex );
528
+
529
+ /* uobject was either already cleaned up or is cleaned up right now anyway */
530
+ if (!uobj_file -> uobj .context ||
531
+ !down_read_trylock (& uobj_file -> uobj .context -> cleanup_rwsem ))
532
+ goto unlock ;
533
+
534
+ ucontext = uobj_file -> uobj .context ;
535
+ ret = _rdma_remove_commit_uobject (& uobj_file -> uobj , RDMA_REMOVE_CLOSE ,
536
+ true);
537
+ up_read (& ucontext -> cleanup_rwsem );
538
+ if (ret )
539
+ pr_warn ("uverbs: unable to clean up uobject file in uverbs_close_fd.\n" );
540
+ unlock :
541
+ mutex_unlock (& ufile -> cleanup_mutex );
542
+ }
543
+
544
+ void uverbs_close_fd (struct file * f )
545
+ {
546
+ struct ib_uobject_file * uobj_file = f -> private_data ;
547
+ struct kref * uverbs_file_ref = & uobj_file -> ufile -> ref ;
548
+
549
+ _uverbs_close_fd (uobj_file );
550
+ uverbs_uobject_put (& uobj_file -> uobj );
551
+ kref_put (uverbs_file_ref , ib_uverbs_release_file );
552
+ }
553
+
395
554
void uverbs_cleanup_ucontext (struct ib_ucontext * ucontext , bool device_removed )
396
555
{
397
556
enum rdma_remove_reason reason = device_removed ?
@@ -412,7 +571,13 @@ void uverbs_cleanup_ucontext(struct ib_ucontext *ucontext, bool device_removed)
412
571
413
572
/*
414
573
* This shouldn't run while executing other commands on this
415
- * context.
574
+ * context. Thus, the only thing we should take care of is
575
+ * releasing a FD while traversing this list. The FD could be
576
+ * closed and released from the _release fop of this FD.
577
+ * In order to mitigate this, we add a lock.
578
+ * We take and release the lock per order traversal in order
579
+ * to let other threads (which might still use the FDs) chance
580
+ * to run.
416
581
*/
417
582
mutex_lock (& ucontext -> uobjects_lock );
418
583
list_for_each_entry_safe (obj , next_obj , & ucontext -> uobjects ,
@@ -448,3 +613,13 @@ void uverbs_initialize_ucontext(struct ib_ucontext *ucontext)
448
613
init_rwsem (& ucontext -> cleanup_rwsem );
449
614
}
450
615
616
+ const struct uverbs_obj_type_class uverbs_fd_class = {
617
+ .alloc_begin = alloc_begin_fd_uobject ,
618
+ .lookup_get = lookup_get_fd_uobject ,
619
+ .alloc_commit = alloc_commit_fd_uobject ,
620
+ .alloc_abort = alloc_abort_fd_uobject ,
621
+ .lookup_put = lookup_put_fd_uobject ,
622
+ .remove_commit = remove_commit_fd_uobject ,
623
+ .needs_kfree_rcu = false,
624
+ };
625
+
0 commit comments