Skip to content

Commit 2f9c951

Browse files
committed
io_uring/net: support bundles for recv
If IORING_OP_RECV is used with provided buffers, the caller may also set IORING_RECVSEND_BUNDLE to turn it into a multi-buffer recv. This grabs buffers available and receives into them, posting a single completion for all of it. This can be used with multishot receive as well, or without it. Now that both send and receive support bundles, add a feature flag for it as well. If IORING_FEAT_RECVSEND_BUNDLE is set after registering the ring, then the kernel supports bundles for recv and send. Signed-off-by: Jens Axboe <[email protected]>
1 parent a05d1f6 commit 2f9c951

File tree

3 files changed

+105
-29
lines changed

3 files changed

+105
-29
lines changed

include/uapi/linux/io_uring.h

Lines changed: 8 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -352,13 +352,13 @@ enum io_uring_op {
352352
* IORING_NOTIF_USAGE_ZC_COPIED if data was copied
353353
* (at least partially).
354354
*
355-
* IORING_RECVSEND_BUNDLE Used with IOSQE_BUFFER_SELECT. If set, send will
356-
* grab as many buffers from the buffer group ID
357-
* given and send them all. The completion result
358-
* will be the number of buffers send, with the
359-
* starting buffer ID in cqe->flags as per usual
360-
* for provided buffer usage. The buffers will be
361-
* contigious from the starting buffer ID.
355+
* IORING_RECVSEND_BUNDLE Used with IOSQE_BUFFER_SELECT. If set, send or
356+
* recv will grab as many buffers from the buffer
357+
* group ID given and send them all. The completion
358+
* result will be the number of buffers send, with
359+
* the starting buffer ID in cqe->flags as per
360+
* usual for provided buffer usage. The buffers
361+
* will be contigious from the starting buffer ID.
362362
*/
363363
#define IORING_RECVSEND_POLL_FIRST (1U << 0)
364364
#define IORING_RECV_MULTISHOT (1U << 1)
@@ -529,6 +529,7 @@ struct io_uring_params {
529529
#define IORING_FEAT_CQE_SKIP (1U << 11)
530530
#define IORING_FEAT_LINKED_FILE (1U << 12)
531531
#define IORING_FEAT_REG_REG_RING (1U << 13)
532+
#define IORING_FEAT_RECVSEND_BUNDLE (1U << 14)
532533

533534
/*
534535
* io_uring_register(2) opcodes and arguments

io_uring/io_uring.c

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -3583,7 +3583,8 @@ static __cold int io_uring_create(unsigned entries, struct io_uring_params *p,
35833583
IORING_FEAT_POLL_32BITS | IORING_FEAT_SQPOLL_NONFIXED |
35843584
IORING_FEAT_EXT_ARG | IORING_FEAT_NATIVE_WORKERS |
35853585
IORING_FEAT_RSRC_TAGS | IORING_FEAT_CQE_SKIP |
3586-
IORING_FEAT_LINKED_FILE | IORING_FEAT_REG_REG_RING;
3586+
IORING_FEAT_LINKED_FILE | IORING_FEAT_REG_REG_RING |
3587+
IORING_FEAT_RECVSEND_BUNDLE;
35873588

35883589
if (copy_to_user(params, p, sizeof(*p))) {
35893590
ret = -EFAULT;

io_uring/net.c

Lines changed: 95 additions & 21 deletions
Original file line numberDiff line numberDiff line change
@@ -747,7 +747,8 @@ static int io_recvmsg_prep_setup(struct io_kiocb *req)
747747
return ret;
748748
}
749749

750-
#define RECVMSG_FLAGS (IORING_RECVSEND_POLL_FIRST | IORING_RECV_MULTISHOT)
750+
#define RECVMSG_FLAGS (IORING_RECVSEND_POLL_FIRST | IORING_RECV_MULTISHOT | \
751+
IORING_RECVSEND_BUNDLE)
751752

752753
int io_recvmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
753754
{
@@ -761,21 +762,14 @@ int io_recvmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
761762
sr->umsg = u64_to_user_ptr(READ_ONCE(sqe->addr));
762763
sr->len = READ_ONCE(sqe->len);
763764
sr->flags = READ_ONCE(sqe->ioprio);
764-
if (sr->flags & ~(RECVMSG_FLAGS))
765+
if (sr->flags & ~RECVMSG_FLAGS)
765766
return -EINVAL;
766767
sr->msg_flags = READ_ONCE(sqe->msg_flags);
767768
if (sr->msg_flags & MSG_DONTWAIT)
768769
req->flags |= REQ_F_NOWAIT;
769770
if (sr->msg_flags & MSG_ERRQUEUE)
770771
req->flags |= REQ_F_CLEAR_POLLIN;
771-
if (sr->flags & IORING_RECV_MULTISHOT) {
772-
if (!(req->flags & REQ_F_BUFFER_SELECT))
773-
return -EINVAL;
774-
if (sr->msg_flags & MSG_WAITALL)
775-
return -EINVAL;
776-
if (req->opcode == IORING_OP_RECV && sr->len)
777-
return -EINVAL;
778-
req->flags |= REQ_F_APOLL_MULTISHOT;
772+
if (req->flags & REQ_F_BUFFER_SELECT) {
779773
/*
780774
* Store the buffer group for this multishot receive separately,
781775
* as if we end up doing an io-wq based issue that selects a
@@ -785,6 +779,20 @@ int io_recvmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
785779
* restore it.
786780
*/
787781
sr->buf_group = req->buf_index;
782+
req->buf_list = NULL;
783+
}
784+
if (sr->flags & IORING_RECV_MULTISHOT) {
785+
if (!(req->flags & REQ_F_BUFFER_SELECT))
786+
return -EINVAL;
787+
if (sr->msg_flags & MSG_WAITALL)
788+
return -EINVAL;
789+
if (req->opcode == IORING_OP_RECV && sr->len)
790+
return -EINVAL;
791+
req->flags |= REQ_F_APOLL_MULTISHOT;
792+
}
793+
if (sr->flags & IORING_RECVSEND_BUNDLE) {
794+
if (req->opcode == IORING_OP_RECVMSG)
795+
return -EINVAL;
788796
}
789797

790798
#ifdef CONFIG_COMPAT
@@ -805,19 +813,28 @@ static inline bool io_recv_finish(struct io_kiocb *req, int *ret,
805813
struct io_async_msghdr *kmsg,
806814
bool mshot_finished, unsigned issue_flags)
807815
{
816+
struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
808817
unsigned int cflags;
809818

810-
cflags = io_put_kbuf(req, issue_flags);
819+
if (sr->flags & IORING_RECVSEND_BUNDLE)
820+
cflags = io_put_kbufs(req, io_bundle_nbufs(kmsg, *ret),
821+
issue_flags);
822+
else
823+
cflags = io_put_kbuf(req, issue_flags);
824+
811825
if (kmsg->msg.msg_inq > 0)
812826
cflags |= IORING_CQE_F_SOCK_NONEMPTY;
813827

828+
/* bundle with no more immediate buffers, we're done */
829+
if (sr->flags & IORING_RECVSEND_BUNDLE && req->flags & REQ_F_BL_EMPTY)
830+
goto finish;
831+
814832
/*
815833
* Fill CQE for this receive and see if we should keep trying to
816834
* receive from this socket.
817835
*/
818836
if ((req->flags & REQ_F_APOLL_MULTISHOT) && !mshot_finished &&
819837
io_req_post_cqe(req, *ret, cflags | IORING_CQE_F_MORE)) {
820-
struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
821838
int mshot_retry_ret = IOU_ISSUE_SKIP_COMPLETE;
822839

823840
io_mshot_prep_retry(req, kmsg);
@@ -837,6 +854,7 @@ static inline bool io_recv_finish(struct io_kiocb *req, int *ret,
837854
}
838855

839856
/* Finish the request / stop multishot. */
857+
finish:
840858
io_req_set_res(req, *ret, cflags);
841859

842860
if (issue_flags & IO_URING_F_MULTISHOT)
@@ -1020,6 +1038,69 @@ int io_recvmsg(struct io_kiocb *req, unsigned int issue_flags)
10201038
return ret;
10211039
}
10221040

1041+
static int io_recv_buf_select(struct io_kiocb *req, struct io_async_msghdr *kmsg,
1042+
size_t *len, unsigned int issue_flags)
1043+
{
1044+
struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
1045+
int ret;
1046+
1047+
/*
1048+
* If the ring isn't locked, then don't use the peek interface
1049+
* to grab multiple buffers as we will lock/unlock between
1050+
* this selection and posting the buffers.
1051+
*/
1052+
if (!(issue_flags & IO_URING_F_UNLOCKED) &&
1053+
sr->flags & IORING_RECVSEND_BUNDLE) {
1054+
struct buf_sel_arg arg = {
1055+
.iovs = &kmsg->fast_iov,
1056+
.nr_iovs = 1,
1057+
.mode = KBUF_MODE_EXPAND,
1058+
};
1059+
1060+
if (kmsg->free_iov) {
1061+
arg.nr_iovs = kmsg->free_iov_nr;
1062+
arg.iovs = kmsg->free_iov;
1063+
arg.mode |= KBUF_MODE_FREE;
1064+
}
1065+
1066+
if (kmsg->msg.msg_inq > 0)
1067+
arg.max_len = min_not_zero(sr->len, kmsg->msg.msg_inq);
1068+
1069+
ret = io_buffers_peek(req, &arg);
1070+
if (unlikely(ret < 0))
1071+
return ret;
1072+
1073+
/* special case 1 vec, can be a fast path */
1074+
if (ret == 1) {
1075+
sr->buf = arg.iovs[0].iov_base;
1076+
sr->len = arg.iovs[0].iov_len;
1077+
goto map_ubuf;
1078+
}
1079+
iov_iter_init(&kmsg->msg.msg_iter, ITER_DEST, arg.iovs, ret,
1080+
arg.out_len);
1081+
if (arg.iovs != &kmsg->fast_iov && arg.iovs != kmsg->free_iov) {
1082+
kmsg->free_iov_nr = ret;
1083+
kmsg->free_iov = arg.iovs;
1084+
}
1085+
} else {
1086+
void __user *buf;
1087+
1088+
*len = sr->len;
1089+
buf = io_buffer_select(req, len, issue_flags);
1090+
if (!buf)
1091+
return -ENOBUFS;
1092+
sr->buf = buf;
1093+
sr->len = *len;
1094+
map_ubuf:
1095+
ret = import_ubuf(ITER_DEST, sr->buf, sr->len,
1096+
&kmsg->msg.msg_iter);
1097+
if (unlikely(ret))
1098+
return ret;
1099+
}
1100+
1101+
return 0;
1102+
}
1103+
10231104
int io_recv(struct io_kiocb *req, unsigned int issue_flags)
10241105
{
10251106
struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
@@ -1044,17 +1125,10 @@ int io_recv(struct io_kiocb *req, unsigned int issue_flags)
10441125

10451126
retry_multishot:
10461127
if (io_do_buffer_select(req)) {
1047-
void __user *buf;
1048-
1049-
buf = io_buffer_select(req, &len, issue_flags);
1050-
if (!buf)
1051-
return -ENOBUFS;
1052-
sr->buf = buf;
1053-
sr->len = len;
1054-
ret = import_ubuf(ITER_DEST, sr->buf, sr->len,
1055-
&kmsg->msg.msg_iter);
1128+
ret = io_recv_buf_select(req, kmsg, &len, issue_flags);
10561129
if (unlikely(ret))
10571130
goto out_free;
1131+
sr->buf = NULL;
10581132
}
10591133

10601134
kmsg->msg.msg_inq = -1;

0 commit comments

Comments
 (0)