Skip to content

Commit e65ef56

Browse files
committed
io_uring: use regular request ref counts
Get rid of the special casing of "normal" requests not having any references to the io_kiocb. We initialize the ref count to 2, one for the submission side, and one or the completion side. Signed-off-by: Jens Axboe <[email protected]>
1 parent 12ad143 commit e65ef56

File tree

1 file changed

+35
-19
lines changed

1 file changed

+35
-19
lines changed

fs/io_uring.c

Lines changed: 35 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -411,7 +411,8 @@ static struct io_kiocb *io_get_req(struct io_ring_ctx *ctx,
411411

412412
req->ctx = ctx;
413413
req->flags = 0;
414-
refcount_set(&req->refs, 0);
414+
/* one is dropped after submission, the other at completion */
415+
refcount_set(&req->refs, 2);
415416
return req;
416417
out:
417418
io_ring_drop_ctx_refs(ctx, 1);
@@ -429,10 +430,14 @@ static void io_free_req_many(struct io_ring_ctx *ctx, void **reqs, int *nr)
429430

430431
static void io_free_req(struct io_kiocb *req)
431432
{
432-
if (!refcount_read(&req->refs) || refcount_dec_and_test(&req->refs)) {
433-
io_ring_drop_ctx_refs(req->ctx, 1);
434-
kmem_cache_free(req_cachep, req);
435-
}
433+
io_ring_drop_ctx_refs(req->ctx, 1);
434+
kmem_cache_free(req_cachep, req);
435+
}
436+
437+
static void io_put_req(struct io_kiocb *req)
438+
{
439+
if (refcount_dec_and_test(&req->refs))
440+
io_free_req(req);
436441
}
437442

438443
/*
@@ -453,7 +458,8 @@ static void io_iopoll_complete(struct io_ring_ctx *ctx, unsigned int *nr_events,
453458

454459
io_cqring_fill_event(ctx, req->user_data, req->error, 0);
455460

456-
reqs[to_free++] = req;
461+
if (refcount_dec_and_test(&req->refs))
462+
reqs[to_free++] = req;
457463
(*nr_events)++;
458464

459465
/*
@@ -616,7 +622,7 @@ static void io_complete_rw(struct kiocb *kiocb, long res, long res2)
616622

617623
io_fput(req);
618624
io_cqring_add_event(req->ctx, req->user_data, res, 0);
619-
io_free_req(req);
625+
io_put_req(req);
620626
}
621627

622628
static void io_complete_rw_iopoll(struct kiocb *kiocb, long res, long res2)
@@ -1083,7 +1089,7 @@ static int io_nop(struct io_kiocb *req, u64 user_data)
10831089
io_fput(req);
10841090
}
10851091
io_cqring_add_event(ctx, user_data, err, 0);
1086-
io_free_req(req);
1092+
io_put_req(req);
10871093
return 0;
10881094
}
10891095

@@ -1146,7 +1152,7 @@ static int io_fsync(struct io_kiocb *req, const struct io_uring_sqe *sqe,
11461152

11471153
io_fput(req);
11481154
io_cqring_add_event(req->ctx, sqe->user_data, ret, 0);
1149-
io_free_req(req);
1155+
io_put_req(req);
11501156
return 0;
11511157
}
11521158

@@ -1204,15 +1210,15 @@ static int io_poll_remove(struct io_kiocb *req, const struct io_uring_sqe *sqe)
12041210
spin_unlock_irq(&ctx->completion_lock);
12051211

12061212
io_cqring_add_event(req->ctx, sqe->user_data, ret, 0);
1207-
io_free_req(req);
1213+
io_put_req(req);
12081214
return 0;
12091215
}
12101216

12111217
static void io_poll_complete(struct io_kiocb *req, __poll_t mask)
12121218
{
12131219
io_cqring_add_event(req->ctx, req->user_data, mangle_poll(mask), 0);
12141220
io_fput(req);
1215-
io_free_req(req);
1221+
io_put_req(req);
12161222
}
12171223

12181224
static void io_poll_complete_work(struct work_struct *work)
@@ -1346,9 +1352,6 @@ static int io_poll_add(struct io_kiocb *req, const struct io_uring_sqe *sqe)
13461352
INIT_LIST_HEAD(&poll->wait.entry);
13471353
init_waitqueue_func_entry(&poll->wait, io_poll_wake);
13481354

1349-
/* one for removal from waitqueue, one for this function */
1350-
refcount_set(&req->refs, 2);
1351-
13521355
mask = vfs_poll(poll->file, &ipt.pt) & poll->events;
13531356
if (unlikely(!poll->head)) {
13541357
/* we did not manage to set up a waitqueue, done */
@@ -1380,13 +1383,12 @@ static int io_poll_add(struct io_kiocb *req, const struct io_uring_sqe *sqe)
13801383
* Drop one of our refs to this req, __io_submit_sqe() will
13811384
* drop the other one since we're returning an error.
13821385
*/
1383-
io_free_req(req);
1386+
io_put_req(req);
13841387
return ipt.error;
13851388
}
13861389

13871390
if (mask)
13881391
io_poll_complete(req, mask);
1389-
io_free_req(req);
13901392
return 0;
13911393
}
13921394

@@ -1524,10 +1526,13 @@ static void io_sq_wq_submit_work(struct work_struct *work)
15241526
break;
15251527
cond_resched();
15261528
} while (1);
1529+
1530+
/* drop submission reference */
1531+
io_put_req(req);
15271532
}
15281533
if (ret) {
15291534
io_cqring_add_event(ctx, sqe->user_data, ret, 0);
1530-
io_free_req(req);
1535+
io_put_req(req);
15311536
}
15321537

15331538
/* async context always use a copy of the sqe */
@@ -1649,11 +1654,22 @@ static int io_submit_sqe(struct io_ring_ctx *ctx, struct sqe_submit *s,
16491654
INIT_WORK(&req->work, io_sq_wq_submit_work);
16501655
queue_work(ctx->sqo_wq, &req->work);
16511656
}
1652-
ret = 0;
1657+
1658+
/*
1659+
* Queued up for async execution, worker will release
1660+
* submit reference when the iocb is actually
1661+
* submitted.
1662+
*/
1663+
return 0;
16531664
}
16541665
}
1666+
1667+
/* drop submission reference */
1668+
io_put_req(req);
1669+
1670+
/* and drop final reference, if we failed */
16551671
if (ret)
1656-
io_free_req(req);
1672+
io_put_req(req);
16571673

16581674
return ret;
16591675
}

0 commit comments

Comments
 (0)