@@ -747,7 +747,8 @@ static int io_recvmsg_prep_setup(struct io_kiocb *req)
747
747
return ret ;
748
748
}
749
749
750
- #define RECVMSG_FLAGS (IORING_RECVSEND_POLL_FIRST | IORING_RECV_MULTISHOT)
750
+ #define RECVMSG_FLAGS (IORING_RECVSEND_POLL_FIRST | IORING_RECV_MULTISHOT | \
751
+ IORING_RECVSEND_BUNDLE)
751
752
752
753
int io_recvmsg_prep (struct io_kiocb * req , const struct io_uring_sqe * sqe )
753
754
{
@@ -761,21 +762,14 @@ int io_recvmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
761
762
sr -> umsg = u64_to_user_ptr (READ_ONCE (sqe -> addr ));
762
763
sr -> len = READ_ONCE (sqe -> len );
763
764
sr -> flags = READ_ONCE (sqe -> ioprio );
764
- if (sr -> flags & ~( RECVMSG_FLAGS ) )
765
+ if (sr -> flags & ~RECVMSG_FLAGS )
765
766
return - EINVAL ;
766
767
sr -> msg_flags = READ_ONCE (sqe -> msg_flags );
767
768
if (sr -> msg_flags & MSG_DONTWAIT )
768
769
req -> flags |= REQ_F_NOWAIT ;
769
770
if (sr -> msg_flags & MSG_ERRQUEUE )
770
771
req -> flags |= REQ_F_CLEAR_POLLIN ;
771
- if (sr -> flags & IORING_RECV_MULTISHOT ) {
772
- if (!(req -> flags & REQ_F_BUFFER_SELECT ))
773
- return - EINVAL ;
774
- if (sr -> msg_flags & MSG_WAITALL )
775
- return - EINVAL ;
776
- if (req -> opcode == IORING_OP_RECV && sr -> len )
777
- return - EINVAL ;
778
- req -> flags |= REQ_F_APOLL_MULTISHOT ;
772
+ if (req -> flags & REQ_F_BUFFER_SELECT ) {
779
773
/*
780
774
* Store the buffer group for this multishot receive separately,
781
775
* as if we end up doing an io-wq based issue that selects a
@@ -785,6 +779,20 @@ int io_recvmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
785
779
* restore it.
786
780
*/
787
781
sr -> buf_group = req -> buf_index ;
782
+ req -> buf_list = NULL ;
783
+ }
784
+ if (sr -> flags & IORING_RECV_MULTISHOT ) {
785
+ if (!(req -> flags & REQ_F_BUFFER_SELECT ))
786
+ return - EINVAL ;
787
+ if (sr -> msg_flags & MSG_WAITALL )
788
+ return - EINVAL ;
789
+ if (req -> opcode == IORING_OP_RECV && sr -> len )
790
+ return - EINVAL ;
791
+ req -> flags |= REQ_F_APOLL_MULTISHOT ;
792
+ }
793
+ if (sr -> flags & IORING_RECVSEND_BUNDLE ) {
794
+ if (req -> opcode == IORING_OP_RECVMSG )
795
+ return - EINVAL ;
788
796
}
789
797
790
798
#ifdef CONFIG_COMPAT
@@ -805,19 +813,28 @@ static inline bool io_recv_finish(struct io_kiocb *req, int *ret,
805
813
struct io_async_msghdr * kmsg ,
806
814
bool mshot_finished , unsigned issue_flags )
807
815
{
816
+ struct io_sr_msg * sr = io_kiocb_to_cmd (req , struct io_sr_msg );
808
817
unsigned int cflags ;
809
818
810
- cflags = io_put_kbuf (req , issue_flags );
819
+ if (sr -> flags & IORING_RECVSEND_BUNDLE )
820
+ cflags = io_put_kbufs (req , io_bundle_nbufs (kmsg , * ret ),
821
+ issue_flags );
822
+ else
823
+ cflags = io_put_kbuf (req , issue_flags );
824
+
811
825
if (kmsg -> msg .msg_inq > 0 )
812
826
cflags |= IORING_CQE_F_SOCK_NONEMPTY ;
813
827
828
+ /* bundle with no more immediate buffers, we're done */
829
+ if (sr -> flags & IORING_RECVSEND_BUNDLE && req -> flags & REQ_F_BL_EMPTY )
830
+ goto finish ;
831
+
814
832
/*
815
833
* Fill CQE for this receive and see if we should keep trying to
816
834
* receive from this socket.
817
835
*/
818
836
if ((req -> flags & REQ_F_APOLL_MULTISHOT ) && !mshot_finished &&
819
837
io_req_post_cqe (req , * ret , cflags | IORING_CQE_F_MORE )) {
820
- struct io_sr_msg * sr = io_kiocb_to_cmd (req , struct io_sr_msg );
821
838
int mshot_retry_ret = IOU_ISSUE_SKIP_COMPLETE ;
822
839
823
840
io_mshot_prep_retry (req , kmsg );
@@ -837,6 +854,7 @@ static inline bool io_recv_finish(struct io_kiocb *req, int *ret,
837
854
}
838
855
839
856
/* Finish the request / stop multishot. */
857
+ finish :
840
858
io_req_set_res (req , * ret , cflags );
841
859
842
860
if (issue_flags & IO_URING_F_MULTISHOT )
@@ -1020,6 +1038,69 @@ int io_recvmsg(struct io_kiocb *req, unsigned int issue_flags)
1020
1038
return ret ;
1021
1039
}
1022
1040
1041
+ static int io_recv_buf_select (struct io_kiocb * req , struct io_async_msghdr * kmsg ,
1042
+ size_t * len , unsigned int issue_flags )
1043
+ {
1044
+ struct io_sr_msg * sr = io_kiocb_to_cmd (req , struct io_sr_msg );
1045
+ int ret ;
1046
+
1047
+ /*
1048
+ * If the ring isn't locked, then don't use the peek interface
1049
+ * to grab multiple buffers as we will lock/unlock between
1050
+ * this selection and posting the buffers.
1051
+ */
1052
+ if (!(issue_flags & IO_URING_F_UNLOCKED ) &&
1053
+ sr -> flags & IORING_RECVSEND_BUNDLE ) {
1054
+ struct buf_sel_arg arg = {
1055
+ .iovs = & kmsg -> fast_iov ,
1056
+ .nr_iovs = 1 ,
1057
+ .mode = KBUF_MODE_EXPAND ,
1058
+ };
1059
+
1060
+ if (kmsg -> free_iov ) {
1061
+ arg .nr_iovs = kmsg -> free_iov_nr ;
1062
+ arg .iovs = kmsg -> free_iov ;
1063
+ arg .mode |= KBUF_MODE_FREE ;
1064
+ }
1065
+
1066
+ if (kmsg -> msg .msg_inq > 0 )
1067
+ arg .max_len = min_not_zero (sr -> len , kmsg -> msg .msg_inq );
1068
+
1069
+ ret = io_buffers_peek (req , & arg );
1070
+ if (unlikely (ret < 0 ))
1071
+ return ret ;
1072
+
1073
+ /* special case 1 vec, can be a fast path */
1074
+ if (ret == 1 ) {
1075
+ sr -> buf = arg .iovs [0 ].iov_base ;
1076
+ sr -> len = arg .iovs [0 ].iov_len ;
1077
+ goto map_ubuf ;
1078
+ }
1079
+ iov_iter_init (& kmsg -> msg .msg_iter , ITER_DEST , arg .iovs , ret ,
1080
+ arg .out_len );
1081
+ if (arg .iovs != & kmsg -> fast_iov && arg .iovs != kmsg -> free_iov ) {
1082
+ kmsg -> free_iov_nr = ret ;
1083
+ kmsg -> free_iov = arg .iovs ;
1084
+ }
1085
+ } else {
1086
+ void __user * buf ;
1087
+
1088
+ * len = sr -> len ;
1089
+ buf = io_buffer_select (req , len , issue_flags );
1090
+ if (!buf )
1091
+ return - ENOBUFS ;
1092
+ sr -> buf = buf ;
1093
+ sr -> len = * len ;
1094
+ map_ubuf :
1095
+ ret = import_ubuf (ITER_DEST , sr -> buf , sr -> len ,
1096
+ & kmsg -> msg .msg_iter );
1097
+ if (unlikely (ret ))
1098
+ return ret ;
1099
+ }
1100
+
1101
+ return 0 ;
1102
+ }
1103
+
1023
1104
int io_recv (struct io_kiocb * req , unsigned int issue_flags )
1024
1105
{
1025
1106
struct io_sr_msg * sr = io_kiocb_to_cmd (req , struct io_sr_msg );
@@ -1044,17 +1125,10 @@ int io_recv(struct io_kiocb *req, unsigned int issue_flags)
1044
1125
1045
1126
retry_multishot :
1046
1127
if (io_do_buffer_select (req )) {
1047
- void __user * buf ;
1048
-
1049
- buf = io_buffer_select (req , & len , issue_flags );
1050
- if (!buf )
1051
- return - ENOBUFS ;
1052
- sr -> buf = buf ;
1053
- sr -> len = len ;
1054
- ret = import_ubuf (ITER_DEST , sr -> buf , sr -> len ,
1055
- & kmsg -> msg .msg_iter );
1128
+ ret = io_recv_buf_select (req , kmsg , & len , issue_flags );
1056
1129
if (unlikely (ret ))
1057
1130
goto out_free ;
1131
+ sr -> buf = NULL ;
1058
1132
}
1059
1133
1060
1134
kmsg -> msg .msg_inq = -1 ;
0 commit comments