Skip to content

Commit d4d19c1

Browse files
isilenceaxboe
authored andcommitted
io_uring: decouple CQE filling from requests
Make __io_cqring_fill_event() agnostic of struct io_kiocb, pass all the data needed directly into it. Will be used to post rsrc removal completions, which don't have an associated request. Signed-off-by: Pavel Begunkov <[email protected]> Link: https://lore.kernel.org/r/c9b8da9e42772db2033547dfebe479dc972a0f2c.1619356238.git.asml.silence@gmail.com Signed-off-by: Jens Axboe <[email protected]>
1 parent 44b31f2 commit d4d19c1

File tree

1 file changed

+28
-27
lines changed

1 file changed

+28
-27
lines changed

fs/io_uring.c

Lines changed: 28 additions & 27 deletions
Original file line numberDiff line numberDiff line change
@@ -1025,7 +1025,8 @@ static void io_uring_try_cancel_requests(struct io_ring_ctx *ctx,
10251025
static void io_uring_cancel_sqpoll(struct io_sq_data *sqd);
10261026
static struct io_rsrc_node *io_rsrc_node_alloc(struct io_ring_ctx *ctx);
10271027

1028-
static bool io_cqring_fill_event(struct io_kiocb *req, long res, unsigned cflags);
1028+
static bool io_cqring_fill_event(struct io_ring_ctx *ctx, u64 user_data,
1029+
long res, unsigned int cflags);
10291030
static void io_put_req(struct io_kiocb *req);
10301031
static void io_put_req_deferred(struct io_kiocb *req, int nr);
10311032
static void io_dismantle_req(struct io_kiocb *req);
@@ -1266,7 +1267,7 @@ static void io_kill_timeout(struct io_kiocb *req, int status)
12661267
atomic_set(&req->ctx->cq_timeouts,
12671268
atomic_read(&req->ctx->cq_timeouts) + 1);
12681269
list_del_init(&req->timeout.list);
1269-
io_cqring_fill_event(req, status, 0);
1270+
io_cqring_fill_event(req->ctx, req->user_data, status, 0);
12701271
io_put_req_deferred(req, 1);
12711272
}
12721273
}
@@ -1500,10 +1501,9 @@ static inline void req_ref_get(struct io_kiocb *req)
15001501
atomic_inc(&req->refs);
15011502
}
15021503

1503-
static bool io_cqring_event_overflow(struct io_kiocb *req, long res,
1504-
unsigned int cflags)
1504+
static bool io_cqring_event_overflow(struct io_ring_ctx *ctx, u64 user_data,
1505+
long res, unsigned int cflags)
15051506
{
1506-
struct io_ring_ctx *ctx = req->ctx;
15071507
struct io_overflow_cqe *ocqe;
15081508

15091509
ocqe = kmalloc(sizeof(*ocqe), GFP_ATOMIC | __GFP_ACCOUNT);
@@ -1521,20 +1521,19 @@ static bool io_cqring_event_overflow(struct io_kiocb *req, long res,
15211521
set_bit(0, &ctx->cq_check_overflow);
15221522
ctx->rings->sq_flags |= IORING_SQ_CQ_OVERFLOW;
15231523
}
1524-
ocqe->cqe.user_data = req->user_data;
1524+
ocqe->cqe.user_data = user_data;
15251525
ocqe->cqe.res = res;
15261526
ocqe->cqe.flags = cflags;
15271527
list_add_tail(&ocqe->list, &ctx->cq_overflow_list);
15281528
return true;
15291529
}
15301530

1531-
static inline bool __io_cqring_fill_event(struct io_kiocb *req, long res,
1532-
unsigned int cflags)
1531+
static inline bool __io_cqring_fill_event(struct io_ring_ctx *ctx, u64 user_data,
1532+
long res, unsigned int cflags)
15331533
{
1534-
struct io_ring_ctx *ctx = req->ctx;
15351534
struct io_uring_cqe *cqe;
15361535

1537-
trace_io_uring_complete(ctx, req->user_data, res, cflags);
1536+
trace_io_uring_complete(ctx, user_data, res, cflags);
15381537

15391538
/*
15401539
* If we can't get a cq entry, userspace overflowed the
@@ -1543,19 +1542,19 @@ static inline bool __io_cqring_fill_event(struct io_kiocb *req, long res,
15431542
*/
15441543
cqe = io_get_cqring(ctx);
15451544
if (likely(cqe)) {
1546-
WRITE_ONCE(cqe->user_data, req->user_data);
1545+
WRITE_ONCE(cqe->user_data, user_data);
15471546
WRITE_ONCE(cqe->res, res);
15481547
WRITE_ONCE(cqe->flags, cflags);
15491548
return true;
15501549
}
1551-
return io_cqring_event_overflow(req, res, cflags);
1550+
return io_cqring_event_overflow(ctx, user_data, res, cflags);
15521551
}
15531552

15541553
/* not as hot to bloat with inlining */
1555-
static noinline bool io_cqring_fill_event(struct io_kiocb *req, long res,
1556-
unsigned int cflags)
1554+
static noinline bool io_cqring_fill_event(struct io_ring_ctx *ctx, u64 user_data,
1555+
long res, unsigned int cflags)
15571556
{
1558-
return __io_cqring_fill_event(req, res, cflags);
1557+
return __io_cqring_fill_event(ctx, user_data, res, cflags);
15591558
}
15601559

15611560
static void io_req_complete_post(struct io_kiocb *req, long res,
@@ -1565,7 +1564,7 @@ static void io_req_complete_post(struct io_kiocb *req, long res,
15651564
unsigned long flags;
15661565

15671566
spin_lock_irqsave(&ctx->completion_lock, flags);
1568-
__io_cqring_fill_event(req, res, cflags);
1567+
__io_cqring_fill_event(ctx, req->user_data, res, cflags);
15691568
/*
15701569
* If we're the last reference to this request, add to our locked
15711570
* free_list cache.
@@ -1776,7 +1775,8 @@ static bool io_kill_linked_timeout(struct io_kiocb *req)
17761775
io_remove_next_linked(req);
17771776
link->timeout.head = NULL;
17781777
if (hrtimer_try_to_cancel(&io->timer) != -1) {
1779-
io_cqring_fill_event(link, -ECANCELED, 0);
1778+
io_cqring_fill_event(link->ctx, link->user_data,
1779+
-ECANCELED, 0);
17801780
io_put_req_deferred(link, 1);
17811781
return true;
17821782
}
@@ -1795,7 +1795,7 @@ static void io_fail_links(struct io_kiocb *req)
17951795
link->link = NULL;
17961796

17971797
trace_io_uring_fail_link(req, link);
1798-
io_cqring_fill_event(link, -ECANCELED, 0);
1798+
io_cqring_fill_event(link->ctx, link->user_data, -ECANCELED, 0);
17991799
io_put_req_deferred(link, 2);
18001800
link = nxt;
18011801
}
@@ -2116,7 +2116,8 @@ static void io_submit_flush_completions(struct io_comp_state *cs,
21162116
spin_lock_irq(&ctx->completion_lock);
21172117
for (i = 0; i < nr; i++) {
21182118
req = cs->reqs[i];
2119-
__io_cqring_fill_event(req, req->result, req->compl.cflags);
2119+
__io_cqring_fill_event(ctx, req->user_data, req->result,
2120+
req->compl.cflags);
21202121
}
21212122
io_commit_cqring(ctx);
21222123
spin_unlock_irq(&ctx->completion_lock);
@@ -2256,7 +2257,7 @@ static void io_iopoll_complete(struct io_ring_ctx *ctx, unsigned int *nr_events,
22562257
if (req->flags & REQ_F_BUFFER_SELECTED)
22572258
cflags = io_put_rw_kbuf(req);
22582259

2259-
__io_cqring_fill_event(req, req->result, cflags);
2260+
__io_cqring_fill_event(ctx, req->user_data, req->result, cflags);
22602261
(*nr_events)++;
22612262

22622263
if (req_ref_put_and_test(req))
@@ -4875,7 +4876,7 @@ static bool io_poll_complete(struct io_kiocb *req, __poll_t mask)
48754876
}
48764877
if (req->poll.events & EPOLLONESHOT)
48774878
flags = 0;
4878-
if (!io_cqring_fill_event(req, error, flags)) {
4879+
if (!io_cqring_fill_event(ctx, req->user_data, error, flags)) {
48794880
io_poll_remove_waitqs(req);
48804881
req->poll.done = true;
48814882
flags = 0;
@@ -5203,7 +5204,7 @@ static bool io_poll_remove_one(struct io_kiocb *req)
52035204

52045205
do_complete = io_poll_remove_waitqs(req);
52055206
if (do_complete) {
5206-
io_cqring_fill_event(req, -ECANCELED, 0);
5207+
io_cqring_fill_event(req->ctx, req->user_data, -ECANCELED, 0);
52075208
io_commit_cqring(req->ctx);
52085209
req_set_fail_links(req);
52095210
io_put_req_deferred(req, 1);
@@ -5455,7 +5456,7 @@ static enum hrtimer_restart io_timeout_fn(struct hrtimer *timer)
54555456
atomic_set(&req->ctx->cq_timeouts,
54565457
atomic_read(&req->ctx->cq_timeouts) + 1);
54575458

5458-
io_cqring_fill_event(req, -ETIME, 0);
5459+
io_cqring_fill_event(ctx, req->user_data, -ETIME, 0);
54595460
io_commit_cqring(ctx);
54605461
spin_unlock_irqrestore(&ctx->completion_lock, flags);
54615462

@@ -5497,7 +5498,7 @@ static int io_timeout_cancel(struct io_ring_ctx *ctx, __u64 user_data)
54975498
return PTR_ERR(req);
54985499

54995500
req_set_fail_links(req);
5500-
io_cqring_fill_event(req, -ECANCELED, 0);
5501+
io_cqring_fill_event(ctx, req->user_data, -ECANCELED, 0);
55015502
io_put_req_deferred(req, 1);
55025503
return 0;
55035504
}
@@ -5570,7 +5571,7 @@ static int io_timeout_remove(struct io_kiocb *req, unsigned int issue_flags)
55705571
ret = io_timeout_update(ctx, tr->addr, &tr->ts,
55715572
io_translate_timeout_mode(tr->flags));
55725573

5573-
io_cqring_fill_event(req, ret, 0);
5574+
io_cqring_fill_event(ctx, req->user_data, ret, 0);
55745575
io_commit_cqring(ctx);
55755576
spin_unlock_irq(&ctx->completion_lock);
55765577
io_cqring_ev_posted(ctx);
@@ -5722,7 +5723,7 @@ static void io_async_find_and_cancel(struct io_ring_ctx *ctx,
57225723
done:
57235724
if (!ret)
57245725
ret = success_ret;
5725-
io_cqring_fill_event(req, ret, 0);
5726+
io_cqring_fill_event(ctx, req->user_data, ret, 0);
57265727
io_commit_cqring(ctx);
57275728
spin_unlock_irqrestore(&ctx->completion_lock, flags);
57285729
io_cqring_ev_posted(ctx);
@@ -5779,7 +5780,7 @@ static int io_async_cancel(struct io_kiocb *req, unsigned int issue_flags)
57795780

57805781
spin_lock_irq(&ctx->completion_lock);
57815782
done:
5782-
io_cqring_fill_event(req, ret, 0);
5783+
io_cqring_fill_event(ctx, req->user_data, ret, 0);
57835784
io_commit_cqring(ctx);
57845785
spin_unlock_irq(&ctx->completion_lock);
57855786
io_cqring_ev_posted(ctx);

0 commit comments

Comments
 (0)