Skip to content

Commit 05b538c

Browse files
committed
io_uring: fix not locked access to fixed buf table
We can look inside the fixed buffer table only while holding ->uring_lock, however in some cases we don't do the right async prep for IORING_OP_{WRITE,READ}_FIXED ending up with NULL req->imu forcing making an io-wq worker to try to resolve the fixed buffer without proper locking. Move req->imu setup into early req init paths, i.e. io_prep_rw(), which is called unconditionally for rw requests and under uring_lock. Fixes: 634d00d ("io_uring: add full-fledged dynamic buffers support") Signed-off-by: Pavel Begunkov <[email protected]>
1 parent d11d31f commit 05b538c

File tree

1 file changed

+17
-17
lines changed

1 file changed

+17
-17
lines changed

fs/io_uring.c

Lines changed: 17 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -3636,6 +3636,20 @@ static int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe)
36363636
int ret;
36373637

36383638
kiocb->ki_pos = READ_ONCE(sqe->off);
3639+
/* used for fixed read/write too - just read unconditionally */
3640+
req->buf_index = READ_ONCE(sqe->buf_index);
3641+
3642+
if (req->opcode == IORING_OP_READ_FIXED ||
3643+
req->opcode == IORING_OP_WRITE_FIXED) {
3644+
struct io_ring_ctx *ctx = req->ctx;
3645+
u16 index;
3646+
3647+
if (unlikely(req->buf_index >= ctx->nr_user_bufs))
3648+
return -EFAULT;
3649+
index = array_index_nospec(req->buf_index, ctx->nr_user_bufs);
3650+
req->imu = ctx->user_bufs[index];
3651+
io_req_set_rsrc_node(req, ctx, 0);
3652+
}
36393653

36403654
ioprio = READ_ONCE(sqe->ioprio);
36413655
if (ioprio) {
@@ -3648,12 +3662,9 @@ static int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe)
36483662
kiocb->ki_ioprio = get_current_ioprio();
36493663
}
36503664

3651-
req->imu = NULL;
36523665
req->rw.addr = READ_ONCE(sqe->addr);
36533666
req->rw.len = READ_ONCE(sqe->len);
36543667
req->rw.flags = READ_ONCE(sqe->rw_flags);
3655-
/* used for fixed read/write too - just read unconditionally */
3656-
req->buf_index = READ_ONCE(sqe->buf_index);
36573668
return 0;
36583669
}
36593670

@@ -3785,20 +3796,9 @@ static int __io_import_fixed(struct io_kiocb *req, int rw, struct iov_iter *iter
37853796
static int io_import_fixed(struct io_kiocb *req, int rw, struct iov_iter *iter,
37863797
unsigned int issue_flags)
37873798
{
3788-
struct io_mapped_ubuf *imu = req->imu;
3789-
u16 index, buf_index = req->buf_index;
3790-
3791-
if (likely(!imu)) {
3792-
struct io_ring_ctx *ctx = req->ctx;
3793-
3794-
if (unlikely(buf_index >= ctx->nr_user_bufs))
3795-
return -EFAULT;
3796-
io_req_set_rsrc_node(req, ctx, issue_flags);
3797-
index = array_index_nospec(buf_index, ctx->nr_user_bufs);
3798-
imu = READ_ONCE(ctx->user_bufs[index]);
3799-
req->imu = imu;
3800-
}
3801-
return __io_import_fixed(req, rw, iter, imu);
3799+
if (WARN_ON_ONCE(!req->imu))
3800+
return -EFAULT;
3801+
return __io_import_fixed(req, rw, iter, req->imu);
38023802
}
38033803

38043804
static int io_buffer_add_list(struct io_ring_ctx *ctx,

0 commit comments

Comments
 (0)