@@ -411,7 +411,8 @@ static struct io_kiocb *io_get_req(struct io_ring_ctx *ctx,
411
411
412
412
req -> ctx = ctx ;
413
413
req -> flags = 0 ;
414
- refcount_set (& req -> refs , 0 );
414
+ /* one is dropped after submission, the other at completion */
415
+ refcount_set (& req -> refs , 2 );
415
416
return req ;
416
417
out :
417
418
io_ring_drop_ctx_refs (ctx , 1 );
@@ -429,10 +430,14 @@ static void io_free_req_many(struct io_ring_ctx *ctx, void **reqs, int *nr)
429
430
430
431
static void io_free_req (struct io_kiocb * req )
431
432
{
432
- if (!refcount_read (& req -> refs ) || refcount_dec_and_test (& req -> refs )) {
433
- io_ring_drop_ctx_refs (req -> ctx , 1 );
434
- kmem_cache_free (req_cachep , req );
435
- }
433
+ io_ring_drop_ctx_refs (req -> ctx , 1 );
434
+ kmem_cache_free (req_cachep , req );
435
+ }
436
+
437
+ static void io_put_req (struct io_kiocb * req )
438
+ {
439
+ if (refcount_dec_and_test (& req -> refs ))
440
+ io_free_req (req );
436
441
}
437
442
438
443
/*
@@ -453,7 +458,8 @@ static void io_iopoll_complete(struct io_ring_ctx *ctx, unsigned int *nr_events,
453
458
454
459
io_cqring_fill_event (ctx , req -> user_data , req -> error , 0 );
455
460
456
- reqs [to_free ++ ] = req ;
461
+ if (refcount_dec_and_test (& req -> refs ))
462
+ reqs [to_free ++ ] = req ;
457
463
(* nr_events )++ ;
458
464
459
465
/*
@@ -616,7 +622,7 @@ static void io_complete_rw(struct kiocb *kiocb, long res, long res2)
616
622
617
623
io_fput (req );
618
624
io_cqring_add_event (req -> ctx , req -> user_data , res , 0 );
619
- io_free_req (req );
625
+ io_put_req (req );
620
626
}
621
627
622
628
static void io_complete_rw_iopoll (struct kiocb * kiocb , long res , long res2 )
@@ -1083,7 +1089,7 @@ static int io_nop(struct io_kiocb *req, u64 user_data)
1083
1089
io_fput (req );
1084
1090
}
1085
1091
io_cqring_add_event (ctx , user_data , err , 0 );
1086
- io_free_req (req );
1092
+ io_put_req (req );
1087
1093
return 0 ;
1088
1094
}
1089
1095
@@ -1146,7 +1152,7 @@ static int io_fsync(struct io_kiocb *req, const struct io_uring_sqe *sqe,
1146
1152
1147
1153
io_fput (req );
1148
1154
io_cqring_add_event (req -> ctx , sqe -> user_data , ret , 0 );
1149
- io_free_req (req );
1155
+ io_put_req (req );
1150
1156
return 0 ;
1151
1157
}
1152
1158
@@ -1204,15 +1210,15 @@ static int io_poll_remove(struct io_kiocb *req, const struct io_uring_sqe *sqe)
1204
1210
spin_unlock_irq (& ctx -> completion_lock );
1205
1211
1206
1212
io_cqring_add_event (req -> ctx , sqe -> user_data , ret , 0 );
1207
- io_free_req (req );
1213
+ io_put_req (req );
1208
1214
return 0 ;
1209
1215
}
1210
1216
1211
1217
static void io_poll_complete (struct io_kiocb * req , __poll_t mask )
1212
1218
{
1213
1219
io_cqring_add_event (req -> ctx , req -> user_data , mangle_poll (mask ), 0 );
1214
1220
io_fput (req );
1215
- io_free_req (req );
1221
+ io_put_req (req );
1216
1222
}
1217
1223
1218
1224
static void io_poll_complete_work (struct work_struct * work )
@@ -1346,9 +1352,6 @@ static int io_poll_add(struct io_kiocb *req, const struct io_uring_sqe *sqe)
1346
1352
INIT_LIST_HEAD (& poll -> wait .entry );
1347
1353
init_waitqueue_func_entry (& poll -> wait , io_poll_wake );
1348
1354
1349
- /* one for removal from waitqueue, one for this function */
1350
- refcount_set (& req -> refs , 2 );
1351
-
1352
1355
mask = vfs_poll (poll -> file , & ipt .pt ) & poll -> events ;
1353
1356
if (unlikely (!poll -> head )) {
1354
1357
/* we did not manage to set up a waitqueue, done */
@@ -1380,13 +1383,12 @@ static int io_poll_add(struct io_kiocb *req, const struct io_uring_sqe *sqe)
1380
1383
* Drop one of our refs to this req, __io_submit_sqe() will
1381
1384
* drop the other one since we're returning an error.
1382
1385
*/
1383
- io_free_req (req );
1386
+ io_put_req (req );
1384
1387
return ipt .error ;
1385
1388
}
1386
1389
1387
1390
if (mask )
1388
1391
io_poll_complete (req , mask );
1389
- io_free_req (req );
1390
1392
return 0 ;
1391
1393
}
1392
1394
@@ -1524,10 +1526,13 @@ static void io_sq_wq_submit_work(struct work_struct *work)
1524
1526
break ;
1525
1527
cond_resched ();
1526
1528
} while (1 );
1529
+
1530
+ /* drop submission reference */
1531
+ io_put_req (req );
1527
1532
}
1528
1533
if (ret ) {
1529
1534
io_cqring_add_event (ctx , sqe -> user_data , ret , 0 );
1530
- io_free_req (req );
1535
+ io_put_req (req );
1531
1536
}
1532
1537
1533
1538
/* async context always use a copy of the sqe */
@@ -1649,11 +1654,22 @@ static int io_submit_sqe(struct io_ring_ctx *ctx, struct sqe_submit *s,
1649
1654
INIT_WORK (& req -> work , io_sq_wq_submit_work );
1650
1655
queue_work (ctx -> sqo_wq , & req -> work );
1651
1656
}
1652
- ret = 0 ;
1657
+
1658
+ /*
1659
+ * Queued up for async execution, worker will release
1660
+ * submit reference when the iocb is actually
1661
+ * submitted.
1662
+ */
1663
+ return 0 ;
1653
1664
}
1654
1665
}
1666
+
1667
+ /* drop submission reference */
1668
+ io_put_req (req );
1669
+
1670
+ /* and drop final reference, if we failed */
1655
1671
if (ret )
1656
- io_free_req (req );
1672
+ io_put_req (req );
1657
1673
1658
1674
return ret ;
1659
1675
}
0 commit comments