Skip to content

Commit eae071c

Browse files
isilenceaxboe
authored andcommitted
io_uring: prepare fixed rw for dynanic buffers
With dynamic buffer updates, registered buffers in the table may change at any moment. First of all we want to prevent future races between updating and importing (i.e. io_import_fixed()), where the latter one may happen without uring_lock held, e.g. from io-wq. Save the first loaded io_mapped_ubuf buffer and reuse. Signed-off-by: Pavel Begunkov <[email protected]> Link: https://lore.kernel.org/r/21a2302d07766ae956640b6f753292c45200fe8f.1619356238.git.asml.silence@gmail.com Signed-off-by: Jens Axboe <[email protected]>
1 parent 41edf1a commit eae071c

File tree

1 file changed

+29
-10
lines changed

1 file changed

+29
-10
lines changed

fs/io_uring.c

Lines changed: 29 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -839,6 +839,8 @@ struct io_kiocb {
839839
struct hlist_node hash_node;
840840
struct async_poll *apoll;
841841
struct io_wq_work work;
842+
/* store used ubuf, so we can prevent reloading */
843+
struct io_mapped_ubuf *imu;
842844
};
843845

844846
struct io_tctx_node {
@@ -2683,6 +2685,12 @@ static int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe)
26832685
kiocb->ki_complete = io_complete_rw;
26842686
}
26852687

2688+
if (req->opcode == IORING_OP_READ_FIXED ||
2689+
req->opcode == IORING_OP_WRITE_FIXED) {
2690+
req->imu = NULL;
2691+
io_req_set_rsrc_node(req);
2692+
}
2693+
26862694
req->rw.addr = READ_ONCE(sqe->addr);
26872695
req->rw.len = READ_ONCE(sqe->len);
26882696
req->buf_index = READ_ONCE(sqe->buf_index);
@@ -2748,21 +2756,13 @@ static void kiocb_done(struct kiocb *kiocb, ssize_t ret,
27482756
}
27492757
}
27502758

2751-
static int io_import_fixed(struct io_kiocb *req, int rw, struct iov_iter *iter)
2759+
static int __io_import_fixed(struct io_kiocb *req, int rw, struct iov_iter *iter,
2760+
struct io_mapped_ubuf *imu)
27522761
{
2753-
struct io_ring_ctx *ctx = req->ctx;
27542762
size_t len = req->rw.len;
2755-
struct io_mapped_ubuf *imu;
2756-
u16 index, buf_index = req->buf_index;
27572763
u64 buf_end, buf_addr = req->rw.addr;
27582764
size_t offset;
27592765

2760-
if (unlikely(buf_index >= ctx->nr_user_bufs))
2761-
return -EFAULT;
2762-
index = array_index_nospec(buf_index, ctx->nr_user_bufs);
2763-
imu = ctx->user_bufs[index];
2764-
buf_addr = req->rw.addr;
2765-
27662766
if (unlikely(check_add_overflow(buf_addr, (u64)len, &buf_end)))
27672767
return -EFAULT;
27682768
/* not inside the mapped region */
@@ -2814,6 +2814,22 @@ static int io_import_fixed(struct io_kiocb *req, int rw, struct iov_iter *iter)
28142814
return 0;
28152815
}
28162816

2817+
static int io_import_fixed(struct io_kiocb *req, int rw, struct iov_iter *iter)
2818+
{
2819+
struct io_ring_ctx *ctx = req->ctx;
2820+
struct io_mapped_ubuf *imu = req->imu;
2821+
u16 index, buf_index = req->buf_index;
2822+
2823+
if (likely(!imu)) {
2824+
if (unlikely(buf_index >= ctx->nr_user_bufs))
2825+
return -EFAULT;
2826+
index = array_index_nospec(buf_index, ctx->nr_user_bufs);
2827+
imu = READ_ONCE(ctx->user_bufs[index]);
2828+
req->imu = imu;
2829+
}
2830+
return __io_import_fixed(req, rw, iter, imu);
2831+
}
2832+
28172833
static void io_ring_submit_unlock(struct io_ring_ctx *ctx, bool needs_lock)
28182834
{
28192835
if (needs_lock)
@@ -9506,6 +9522,9 @@ static int io_uring_create(unsigned entries, struct io_uring_params *p,
95069522
ret = io_sq_offload_create(ctx, p);
95079523
if (ret)
95089524
goto err;
9525+
/* always set a rsrc node */
9526+
io_rsrc_node_switch_start(ctx);
9527+
io_rsrc_node_switch(ctx, NULL);
95099528

95109529
memset(&p->sq_off, 0, sizeof(p->sq_off));
95119530
p->sq_off.head = offsetof(struct io_rings, sq.head);

0 commit comments

Comments
 (0)