Skip to content

Commit 0e9ddb3

Browse files
isilenceaxboe
authored andcommitted
io_uring: cleanup up cancel SQPOLL reqs across exec
For SQPOLL rings tctx_inflight() always returns zero, so it might skip doing full cancelation. It's fine because we jam all sqpoll submissions in any case and do go through files cancel for them, but not nice. Do the intended full cancellation, by mimicking __io_uring_task_cancel() waiting but impersonating SQPOLL task. Signed-off-by: Pavel Begunkov <[email protected]> Signed-off-by: Jens Axboe <[email protected]>
1 parent 257e84a commit 0e9ddb3

File tree

1 file changed

+36
-21
lines changed

1 file changed

+36
-21
lines changed

fs/io_uring.c

Lines changed: 36 additions & 21 deletions
Original file line numberDiff line numberDiff line change
@@ -9083,29 +9083,39 @@ void __io_uring_files_cancel(struct files_struct *files)
90839083

90849084
static s64 tctx_inflight(struct io_uring_task *tctx)
90859085
{
9086-
unsigned long index;
9087-
struct file *file;
9088-
s64 inflight;
9089-
9090-
inflight = percpu_counter_sum(&tctx->inflight);
9091-
if (!tctx->sqpoll)
9092-
return inflight;
9086+
return percpu_counter_sum(&tctx->inflight);
9087+
}
90939088

9094-
/*
9095-
* If we have SQPOLL rings, then we need to iterate and find them, and
9096-
* add the pending count for those.
9097-
*/
9098-
xa_for_each(&tctx->xa, index, file) {
9099-
struct io_ring_ctx *ctx = file->private_data;
9089+
static void io_uring_cancel_sqpoll(struct io_ring_ctx *ctx)
9090+
{
9091+
struct io_uring_task *tctx;
9092+
s64 inflight;
9093+
DEFINE_WAIT(wait);
91009094

9101-
if (ctx->flags & IORING_SETUP_SQPOLL) {
9102-
struct io_uring_task *__tctx = ctx->sqo_task->io_uring;
9095+
if (!ctx->sq_data)
9096+
return;
9097+
tctx = ctx->sq_data->thread->io_uring;
9098+
io_disable_sqo_submit(ctx);
91039099

9104-
inflight += percpu_counter_sum(&__tctx->inflight);
9105-
}
9106-
}
9100+
atomic_inc(&tctx->in_idle);
9101+
do {
9102+
/* read completions before cancelations */
9103+
inflight = tctx_inflight(tctx);
9104+
if (!inflight)
9105+
break;
9106+
io_uring_cancel_task_requests(ctx, NULL);
91079107

9108-
return inflight;
9108+
prepare_to_wait(&tctx->wait, &wait, TASK_UNINTERRUPTIBLE);
9109+
/*
9110+
* If we've seen completions, retry without waiting. This
9111+
* avoids a race where a completion comes in before we did
9112+
* prepare_to_wait().
9113+
*/
9114+
if (inflight == tctx_inflight(tctx))
9115+
schedule();
9116+
finish_wait(&tctx->wait, &wait);
9117+
} while (1);
9118+
atomic_dec(&tctx->in_idle);
91099119
}
91109120

91119121
/*
@@ -9122,8 +9132,13 @@ void __io_uring_task_cancel(void)
91229132
atomic_inc(&tctx->in_idle);
91239133

91249134
/* trigger io_disable_sqo_submit() */
9125-
if (tctx->sqpoll)
9126-
__io_uring_files_cancel(NULL);
9135+
if (tctx->sqpoll) {
9136+
struct file *file;
9137+
unsigned long index;
9138+
9139+
xa_for_each(&tctx->xa, index, file)
9140+
io_uring_cancel_sqpoll(file->private_data);
9141+
}
91279142

91289143
do {
91299144
/* read completions before cancelations */

0 commit comments

Comments
 (0)