@@ -301,6 +301,12 @@ struct io_poll_iocb {
301
301
struct wait_queue_entry wait ;
302
302
};
303
303
304
+ struct io_close {
305
+ struct file * file ;
306
+ struct file * put_file ;
307
+ int fd ;
308
+ };
309
+
304
310
struct io_timeout_data {
305
311
struct io_kiocb * req ;
306
312
struct hrtimer timer ;
@@ -414,6 +420,7 @@ struct io_kiocb {
414
420
struct io_connect connect ;
415
421
struct io_sr_msg sr_msg ;
416
422
struct io_open open ;
423
+ struct io_close close ;
417
424
};
418
425
419
426
struct io_async_ctx * io ;
@@ -2228,6 +2235,94 @@ static int io_openat(struct io_kiocb *req, struct io_kiocb **nxt,
2228
2235
return 0 ;
2229
2236
}
2230
2237
2238
+ static int io_close_prep (struct io_kiocb * req , const struct io_uring_sqe * sqe )
2239
+ {
2240
+ /*
2241
+ * If we queue this for async, it must not be cancellable. That would
2242
+ * leave the 'file' in an undeterminate state.
2243
+ */
2244
+ req -> work .flags |= IO_WQ_WORK_NO_CANCEL ;
2245
+
2246
+ if (sqe -> ioprio || sqe -> off || sqe -> addr || sqe -> len ||
2247
+ sqe -> rw_flags || sqe -> buf_index )
2248
+ return - EINVAL ;
2249
+ if (sqe -> flags & IOSQE_FIXED_FILE )
2250
+ return - EINVAL ;
2251
+
2252
+ req -> close .fd = READ_ONCE (sqe -> fd );
2253
+ if (req -> file -> f_op == & io_uring_fops ||
2254
+ req -> close .fd == req -> ring_fd )
2255
+ return - EBADF ;
2256
+
2257
+ return 0 ;
2258
+ }
2259
+
2260
+ static void io_close_finish (struct io_wq_work * * workptr )
2261
+ {
2262
+ struct io_kiocb * req = container_of (* workptr , struct io_kiocb , work );
2263
+ struct io_kiocb * nxt = NULL ;
2264
+
2265
+ /* Invoked with files, we need to do the close */
2266
+ if (req -> work .files ) {
2267
+ int ret ;
2268
+
2269
+ ret = filp_close (req -> close .put_file , req -> work .files );
2270
+ if (ret < 0 ) {
2271
+ req_set_fail_links (req );
2272
+ }
2273
+ io_cqring_add_event (req , ret );
2274
+ }
2275
+
2276
+ fput (req -> close .put_file );
2277
+
2278
+ /* we bypassed the re-issue, drop the submission reference */
2279
+ io_put_req (req );
2280
+ io_put_req_find_next (req , & nxt );
2281
+ if (nxt )
2282
+ io_wq_assign_next (workptr , nxt );
2283
+ }
2284
+
2285
+ static int io_close (struct io_kiocb * req , struct io_kiocb * * nxt ,
2286
+ bool force_nonblock )
2287
+ {
2288
+ int ret ;
2289
+
2290
+ req -> close .put_file = NULL ;
2291
+ ret = __close_fd_get_file (req -> close .fd , & req -> close .put_file );
2292
+ if (ret < 0 )
2293
+ return ret ;
2294
+
2295
+ /* if the file has a flush method, be safe and punt to async */
2296
+ if (req -> close .put_file -> f_op -> flush && !io_wq_current_is_worker ()) {
2297
+ req -> work .flags |= IO_WQ_WORK_NEEDS_FILES ;
2298
+ goto eagain ;
2299
+ }
2300
+
2301
+ /*
2302
+ * No ->flush(), safely close from here and just punt the
2303
+ * fput() to async context.
2304
+ */
2305
+ ret = filp_close (req -> close .put_file , current -> files );
2306
+
2307
+ if (ret < 0 )
2308
+ req_set_fail_links (req );
2309
+ io_cqring_add_event (req , ret );
2310
+
2311
+ if (io_wq_current_is_worker ()) {
2312
+ struct io_wq_work * old_work , * work ;
2313
+
2314
+ old_work = work = & req -> work ;
2315
+ io_close_finish (& work );
2316
+ if (work && work != old_work )
2317
+ * nxt = container_of (work , struct io_kiocb , work );
2318
+ return 0 ;
2319
+ }
2320
+
2321
+ eagain :
2322
+ req -> work .func = io_close_finish ;
2323
+ return - EAGAIN ;
2324
+ }
2325
+
2231
2326
static int io_prep_sfr (struct io_kiocb * req , const struct io_uring_sqe * sqe )
2232
2327
{
2233
2328
struct io_ring_ctx * ctx = req -> ctx ;
@@ -3256,6 +3351,9 @@ static int io_req_defer_prep(struct io_kiocb *req,
3256
3351
case IORING_OP_OPENAT :
3257
3352
ret = io_openat_prep (req , sqe );
3258
3353
break ;
3354
+ case IORING_OP_CLOSE :
3355
+ ret = io_close_prep (req , sqe );
3356
+ break ;
3259
3357
default :
3260
3358
printk_once (KERN_WARNING "io_uring: unhandled opcode %d\n" ,
3261
3359
req -> opcode );
@@ -3426,6 +3524,14 @@ static int io_issue_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe,
3426
3524
}
3427
3525
ret = io_openat (req , nxt , force_nonblock );
3428
3526
break ;
3527
+ case IORING_OP_CLOSE :
3528
+ if (sqe ) {
3529
+ ret = io_close_prep (req , sqe );
3530
+ if (ret )
3531
+ break ;
3532
+ }
3533
+ ret = io_close (req , nxt , force_nonblock );
3534
+ break ;
3429
3535
default :
3430
3536
ret = - EINVAL ;
3431
3537
break ;
@@ -3572,6 +3678,9 @@ static int io_grab_files(struct io_kiocb *req)
3572
3678
int ret = - EBADF ;
3573
3679
struct io_ring_ctx * ctx = req -> ctx ;
3574
3680
3681
+ if (!req -> ring_file )
3682
+ return - EBADF ;
3683
+
3575
3684
rcu_read_lock ();
3576
3685
spin_lock_irq (& ctx -> inflight_lock );
3577
3686
/*
0 commit comments