Skip to content

Commit b63534c

Browse files
committed
io_uring: re-issue block requests that failed because of resources
Mark the plug with nowait == true, which will cause requests to avoid blocking on request allocation. If they do, we catch them and reissue them from a task_work based handler. Normally we can catch -EAGAIN directly, but the hard case is for split requests. As an example, the application issues a 512KB request. The block core will split this into 128KB if that's the max size for the device. The first request issues just fine, but we run into -EAGAIN for some latter splits for the same request. As the bio is split, we don't get to see the -EAGAIN until one of the actual reads complete, and hence we cannot handle it inline as part of submission. This does potentially cause re-reads of parts of the range, as the whole request is reissued. There's currently no better way to handle this. Signed-off-by: Jens Axboe <[email protected]>
1 parent 4503b76 commit b63534c

File tree

1 file changed

+124
-24
lines changed

1 file changed

+124
-24
lines changed

fs/io_uring.c

Lines changed: 124 additions & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -900,6 +900,13 @@ static int io_file_get(struct io_submit_state *state, struct io_kiocb *req,
900900
static void __io_queue_sqe(struct io_kiocb *req,
901901
const struct io_uring_sqe *sqe);
902902

903+
static ssize_t io_import_iovec(int rw, struct io_kiocb *req,
904+
struct iovec **iovec, struct iov_iter *iter,
905+
bool needs_lock);
906+
static int io_setup_async_rw(struct io_kiocb *req, ssize_t io_size,
907+
struct iovec *iovec, struct iovec *fast_iov,
908+
struct iov_iter *iter);
909+
903910
static struct kmem_cache *req_cachep;
904911

905912
static const struct file_operations io_uring_fops;
@@ -1978,12 +1985,115 @@ static void io_complete_rw_common(struct kiocb *kiocb, long res)
19781985
__io_cqring_add_event(req, res, cflags);
19791986
}
19801987

1988+
static void io_sq_thread_drop_mm(struct io_ring_ctx *ctx)
1989+
{
1990+
struct mm_struct *mm = current->mm;
1991+
1992+
if (mm) {
1993+
kthread_unuse_mm(mm);
1994+
mmput(mm);
1995+
}
1996+
}
1997+
1998+
static int io_sq_thread_acquire_mm(struct io_ring_ctx *ctx,
1999+
struct io_kiocb *req)
2000+
{
2001+
if (io_op_defs[req->opcode].needs_mm && !current->mm) {
2002+
if (unlikely(!mmget_not_zero(ctx->sqo_mm)))
2003+
return -EFAULT;
2004+
kthread_use_mm(ctx->sqo_mm);
2005+
}
2006+
2007+
return 0;
2008+
}
2009+
2010+
#ifdef CONFIG_BLOCK
2011+
static bool io_resubmit_prep(struct io_kiocb *req, int error)
2012+
{
2013+
struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs;
2014+
ssize_t ret = -ECANCELED;
2015+
struct iov_iter iter;
2016+
int rw;
2017+
2018+
if (error) {
2019+
ret = error;
2020+
goto end_req;
2021+
}
2022+
2023+
switch (req->opcode) {
2024+
case IORING_OP_READV:
2025+
case IORING_OP_READ_FIXED:
2026+
case IORING_OP_READ:
2027+
rw = READ;
2028+
break;
2029+
case IORING_OP_WRITEV:
2030+
case IORING_OP_WRITE_FIXED:
2031+
case IORING_OP_WRITE:
2032+
rw = WRITE;
2033+
break;
2034+
default:
2035+
printk_once(KERN_WARNING "io_uring: bad opcode in resubmit %d\n",
2036+
req->opcode);
2037+
goto end_req;
2038+
}
2039+
2040+
ret = io_import_iovec(rw, req, &iovec, &iter, false);
2041+
if (ret < 0)
2042+
goto end_req;
2043+
ret = io_setup_async_rw(req, ret, iovec, inline_vecs, &iter);
2044+
if (!ret)
2045+
return true;
2046+
kfree(iovec);
2047+
end_req:
2048+
io_cqring_add_event(req, ret);
2049+
req_set_fail_links(req);
2050+
io_put_req(req);
2051+
return false;
2052+
}
2053+
2054+
static void io_rw_resubmit(struct callback_head *cb)
2055+
{
2056+
struct io_kiocb *req = container_of(cb, struct io_kiocb, task_work);
2057+
struct io_ring_ctx *ctx = req->ctx;
2058+
int err;
2059+
2060+
__set_current_state(TASK_RUNNING);
2061+
2062+
err = io_sq_thread_acquire_mm(ctx, req);
2063+
2064+
if (io_resubmit_prep(req, err)) {
2065+
refcount_inc(&req->refs);
2066+
io_queue_async_work(req);
2067+
}
2068+
}
2069+
#endif
2070+
2071+
static bool io_rw_reissue(struct io_kiocb *req, long res)
2072+
{
2073+
#ifdef CONFIG_BLOCK
2074+
struct task_struct *tsk;
2075+
int ret;
2076+
2077+
if ((res != -EAGAIN && res != -EOPNOTSUPP) || io_wq_current_is_worker())
2078+
return false;
2079+
2080+
tsk = req->task;
2081+
init_task_work(&req->task_work, io_rw_resubmit);
2082+
ret = task_work_add(tsk, &req->task_work, true);
2083+
if (!ret)
2084+
return true;
2085+
#endif
2086+
return false;
2087+
}
2088+
19812089
static void io_complete_rw(struct kiocb *kiocb, long res, long res2)
19822090
{
19832091
struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw.kiocb);
19842092

1985-
io_complete_rw_common(kiocb, res);
1986-
io_put_req(req);
2093+
if (!io_rw_reissue(req, res)) {
2094+
io_complete_rw_common(kiocb, res);
2095+
io_put_req(req);
2096+
}
19872097
}
19882098

19892099
static void io_complete_rw_iopoll(struct kiocb *kiocb, long res, long res2)
@@ -2169,6 +2279,9 @@ static int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe,
21692279
if (kiocb->ki_flags & IOCB_NOWAIT)
21702280
req->flags |= REQ_F_NOWAIT;
21712281

2282+
if (kiocb->ki_flags & IOCB_DIRECT)
2283+
io_get_req_task(req);
2284+
21722285
if (force_nonblock)
21732286
kiocb->ki_flags |= IOCB_NOWAIT;
21742287

@@ -2668,6 +2781,7 @@ static int io_read(struct io_kiocb *req, bool force_nonblock)
26682781
iov_count = iov_iter_count(&iter);
26692782
ret = rw_verify_area(READ, req->file, &kiocb->ki_pos, iov_count);
26702783
if (!ret) {
2784+
unsigned long nr_segs = iter.nr_segs;
26712785
ssize_t ret2 = 0;
26722786

26732787
if (req->file->f_op->read_iter)
@@ -2679,6 +2793,8 @@ static int io_read(struct io_kiocb *req, bool force_nonblock)
26792793
if (!force_nonblock || (ret2 != -EAGAIN && ret2 != -EIO)) {
26802794
kiocb_done(kiocb, ret2);
26812795
} else {
2796+
iter.count = iov_count;
2797+
iter.nr_segs = nr_segs;
26822798
copy_iov:
26832799
ret = io_setup_async_rw(req, io_size, iovec,
26842800
inline_vecs, &iter);
@@ -2765,6 +2881,7 @@ static int io_write(struct io_kiocb *req, bool force_nonblock)
27652881
iov_count = iov_iter_count(&iter);
27662882
ret = rw_verify_area(WRITE, req->file, &kiocb->ki_pos, iov_count);
27672883
if (!ret) {
2884+
unsigned long nr_segs = iter.nr_segs;
27682885
ssize_t ret2;
27692886

27702887
/*
@@ -2802,6 +2919,8 @@ static int io_write(struct io_kiocb *req, bool force_nonblock)
28022919
if (!force_nonblock || ret2 != -EAGAIN) {
28032920
kiocb_done(kiocb, ret2);
28042921
} else {
2922+
iter.count = iov_count;
2923+
iter.nr_segs = nr_segs;
28052924
copy_iov:
28062925
ret = io_setup_async_rw(req, io_size, iovec,
28072926
inline_vecs, &iter);
@@ -4282,28 +4401,6 @@ static void io_async_queue_proc(struct file *file, struct wait_queue_head *head,
42824401
__io_queue_proc(&pt->req->apoll->poll, pt, head);
42834402
}
42844403

4285-
static void io_sq_thread_drop_mm(struct io_ring_ctx *ctx)
4286-
{
4287-
struct mm_struct *mm = current->mm;
4288-
4289-
if (mm) {
4290-
kthread_unuse_mm(mm);
4291-
mmput(mm);
4292-
}
4293-
}
4294-
4295-
static int io_sq_thread_acquire_mm(struct io_ring_ctx *ctx,
4296-
struct io_kiocb *req)
4297-
{
4298-
if (io_op_defs[req->opcode].needs_mm && !current->mm) {
4299-
if (unlikely(!mmget_not_zero(ctx->sqo_mm)))
4300-
return -EFAULT;
4301-
kthread_use_mm(ctx->sqo_mm);
4302-
}
4303-
4304-
return 0;
4305-
}
4306-
43074404
static void io_async_task_func(struct callback_head *cb)
43084405
{
43094406
struct io_kiocb *req = container_of(cb, struct io_kiocb, task_work);
@@ -5814,6 +5911,9 @@ static void io_submit_state_start(struct io_submit_state *state,
58145911
unsigned int max_ios)
58155912
{
58165913
blk_start_plug(&state->plug);
5914+
#ifdef CONFIG_BLOCK
5915+
state->plug.nowait = true;
5916+
#endif
58175917
state->free_reqs = 0;
58185918
state->file = NULL;
58195919
state->ios_left = max_ios;

0 commit comments

Comments
 (0)