Skip to content

Commit 04fc6c8

Browse files
isilenceaxboe
authored andcommitted
io_uring: save ctx put/get for task_work submit
Do a little trick in io_ring_ctx_free() briefly taking uring_lock, that will wait for everyone currently holding it, so we can skip pinning ctx with ctx->refs for __io_req_task_submit(), which is executed and loses its refs/reqs while holding the lock. Signed-off-by: Pavel Begunkov <[email protected]> Signed-off-by: Jens Axboe <[email protected]>
1 parent 921b905 commit 04fc6c8

File tree

1 file changed

+12
-5
lines changed

1 file changed

+12
-5
lines changed

fs/io_uring.c

Lines changed: 12 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -2336,6 +2336,7 @@ static void __io_req_task_submit(struct io_kiocb *req)
23362336
{
23372337
struct io_ring_ctx *ctx = req->ctx;
23382338

2339+
/* ctx stays valid until unlock, even if we drop all ours ctx->refs */
23392340
mutex_lock(&ctx->uring_lock);
23402341
if (!ctx->sqo_dead && !io_sq_thread_acquire_mm_files(ctx, req))
23412342
__io_queue_sqe(req);
@@ -2347,22 +2348,20 @@ static void __io_req_task_submit(struct io_kiocb *req)
23472348
static void io_req_task_submit(struct callback_head *cb)
23482349
{
23492350
struct io_kiocb *req = container_of(cb, struct io_kiocb, task_work);
2350-
struct io_ring_ctx *ctx = req->ctx;
23512351

23522352
__io_req_task_submit(req);
2353-
percpu_ref_put(&ctx->refs);
23542353
}
23552354

23562355
static void io_req_task_queue(struct io_kiocb *req)
23572356
{
23582357
int ret;
23592358

23602359
req->task_work.func = io_req_task_submit;
2361-
percpu_ref_get(&req->ctx->refs);
2362-
23632360
ret = io_req_task_work_add(req);
2364-
if (unlikely(ret))
2361+
if (unlikely(ret)) {
2362+
percpu_ref_get(&req->ctx->refs);
23652363
io_req_task_work_add_fallback(req, io_req_task_cancel);
2364+
}
23662365
}
23672366

23682367
static inline void io_queue_next(struct io_kiocb *req)
@@ -8707,6 +8706,14 @@ static void io_ring_ctx_free(struct io_ring_ctx *ctx)
87078706
{
87088707
struct io_submit_state *submit_state = &ctx->submit_state;
87098708

8709+
/*
8710+
* Some may use context even when all refs and requests have been put,
8711+
* and they are free to do so while still holding uring_lock, see
8712+
* __io_req_task_submit(). Wait for them to finish.
8713+
*/
8714+
mutex_lock(&ctx->uring_lock);
8715+
mutex_unlock(&ctx->uring_lock);
8716+
87108717
io_finish_async(ctx);
87118718
io_sqe_buffers_unregister(ctx);
87128719

0 commit comments

Comments
 (0)