Skip to content

Commit 5c8b0b5

Browse files
committed
io_uring: have submission side sqe errors post a cqe
Currently we only post a cqe if we get an error OUTSIDE of submission. For submission, we return the error directly through io_uring_enter(). This is a bit awkward for applications, and it makes more sense to always post a cqe with an error, if the error happens on behalf of an sqe. This changes submission behavior a bit. io_uring_enter() returns -ERROR for an error, and > 0 for number of sqes submitted. Before this change, if you wanted to submit 8 entries and had an error on the 5th entry, io_uring_enter() would return 4 (for number of entries successfully submitted) and rewind the sqring. The application would then have to peek at the sqring and figure out what was wrong with the head sqe, and then skip it itself. With this change, we'll return 5 since we did consume 5 sqes, and the last sqe (with the error) will result in a cqe being posted with the error. This makes the logic easier to handle in the application, and it cleans up the submission part. Suggested-by: Stefan Bühler <[email protected]> Signed-off-by: Jens Axboe <[email protected]>
1 parent 6297728 commit 5c8b0b5

File tree

1 file changed

+6
-28
lines changed

1 file changed

+6
-28
lines changed

fs/io_uring.c

Lines changed: 6 additions & 28 deletions
Original file line numberDiff line numberDiff line change
@@ -1801,14 +1801,6 @@ static void io_commit_sqring(struct io_ring_ctx *ctx)
18011801
}
18021802
}
18031803

1804-
/*
1805-
* Undo last io_get_sqring()
1806-
*/
1807-
static void io_drop_sqring(struct io_ring_ctx *ctx)
1808-
{
1809-
ctx->cached_sq_head--;
1810-
}
1811-
18121804
/*
18131805
* Fetch an sqe, if one is available. Note that s->sqe will point to memory
18141806
* that is mapped by userspace. This means that care needs to be taken to
@@ -2018,7 +2010,7 @@ static int io_sq_thread(void *data)
20182010
static int io_ring_submit(struct io_ring_ctx *ctx, unsigned int to_submit)
20192011
{
20202012
struct io_submit_state state, *statep = NULL;
2021-
int i, ret = 0, submit = 0;
2013+
int i, submit = 0;
20222014

20232015
if (to_submit > IO_PLUG_THRESHOLD) {
20242016
io_submit_state_start(&state, ctx, to_submit);
@@ -2027,28 +2019,26 @@ static int io_ring_submit(struct io_ring_ctx *ctx, unsigned int to_submit)
20272019

20282020
for (i = 0; i < to_submit; i++) {
20292021
struct sqe_submit s;
2022+
int ret;
20302023

20312024
if (!io_get_sqring(ctx, &s))
20322025
break;
20332026

20342027
s.has_user = true;
20352028
s.needs_lock = false;
20362029
s.needs_fixed_file = false;
2030+
submit++;
20372031

20382032
ret = io_submit_sqe(ctx, &s, statep);
2039-
if (ret) {
2040-
io_drop_sqring(ctx);
2041-
break;
2042-
}
2043-
2044-
submit++;
2033+
if (ret)
2034+
io_cqring_add_event(ctx, s.sqe->user_data, ret, 0);
20452035
}
20462036
io_commit_sqring(ctx);
20472037

20482038
if (statep)
20492039
io_submit_state_end(statep);
20502040

2051-
return submit ? submit : ret;
2041+
return submit;
20522042
}
20532043

20542044
static unsigned io_cqring_events(struct io_cq_ring *ring)
@@ -2779,24 +2769,12 @@ SYSCALL_DEFINE6(io_uring_enter, unsigned int, fd, u32, to_submit,
27792769
mutex_lock(&ctx->uring_lock);
27802770
submitted = io_ring_submit(ctx, to_submit);
27812771
mutex_unlock(&ctx->uring_lock);
2782-
2783-
if (submitted < 0)
2784-
goto out_ctx;
27852772
}
27862773
if (flags & IORING_ENTER_GETEVENTS) {
27872774
unsigned nr_events = 0;
27882775

27892776
min_complete = min(min_complete, ctx->cq_entries);
27902777

2791-
/*
2792-
* The application could have included the 'to_submit' count
2793-
* in how many events it wanted to wait for. If we failed to
2794-
* submit the desired count, we may need to adjust the number
2795-
* of events to poll/wait for.
2796-
*/
2797-
if (submitted < to_submit)
2798-
min_complete = min_t(unsigned, submitted, min_complete);
2799-
28002778
if (ctx->flags & IORING_SETUP_IOPOLL) {
28012779
mutex_lock(&ctx->uring_lock);
28022780
ret = io_iopoll_check(ctx, &nr_events, min_complete);

0 commit comments

Comments
 (0)