@@ -508,9 +508,8 @@ struct io_async_msghdr {
508
508
509
509
struct io_async_rw {
510
510
struct iovec fast_iov [UIO_FASTIOV ];
511
- struct iovec * iov ;
512
- ssize_t nr_segs ;
513
- ssize_t size ;
511
+ const struct iovec * free_iovec ;
512
+ struct iov_iter iter ;
514
513
struct wait_page_queue wpq ;
515
514
};
516
515
@@ -915,8 +914,8 @@ static void io_file_put_work(struct work_struct *work);
915
914
static ssize_t io_import_iovec (int rw , struct io_kiocb * req ,
916
915
struct iovec * * iovec , struct iov_iter * iter ,
917
916
bool needs_lock );
918
- static int io_setup_async_rw (struct io_kiocb * req , ssize_t io_size ,
919
- struct iovec * iovec , struct iovec * fast_iov ,
917
+ static int io_setup_async_rw (struct io_kiocb * req , const struct iovec * iovec ,
918
+ const struct iovec * fast_iov ,
920
919
struct iov_iter * iter );
921
920
922
921
static struct kmem_cache * req_cachep ;
@@ -2299,7 +2298,7 @@ static bool io_resubmit_prep(struct io_kiocb *req, int error)
2299
2298
ret = io_import_iovec (rw , req , & iovec , & iter , false);
2300
2299
if (ret < 0 )
2301
2300
goto end_req ;
2302
- ret = io_setup_async_rw (req , ret , iovec , inline_vecs , & iter );
2301
+ ret = io_setup_async_rw (req , iovec , inline_vecs , & iter );
2303
2302
if (!ret )
2304
2303
return true;
2305
2304
kfree (iovec );
@@ -2820,6 +2819,13 @@ static ssize_t io_import_iovec(int rw, struct io_kiocb *req,
2820
2819
ssize_t ret ;
2821
2820
u8 opcode ;
2822
2821
2822
+ if (req -> io ) {
2823
+ struct io_async_rw * iorw = & req -> io -> rw ;
2824
+
2825
+ * iovec = NULL ;
2826
+ return iov_iter_count (& iorw -> iter );
2827
+ }
2828
+
2823
2829
opcode = req -> opcode ;
2824
2830
if (opcode == IORING_OP_READ_FIXED || opcode == IORING_OP_WRITE_FIXED ) {
2825
2831
* iovec = NULL ;
@@ -2845,14 +2851,6 @@ static ssize_t io_import_iovec(int rw, struct io_kiocb *req,
2845
2851
return ret < 0 ? ret : sqe_len ;
2846
2852
}
2847
2853
2848
- if (req -> io ) {
2849
- struct io_async_rw * iorw = & req -> io -> rw ;
2850
-
2851
- iov_iter_init (iter , rw , iorw -> iov , iorw -> nr_segs , iorw -> size );
2852
- * iovec = NULL ;
2853
- return iorw -> size ;
2854
- }
2855
-
2856
2854
if (req -> flags & REQ_F_BUFFER_SELECT ) {
2857
2855
ret = io_iov_buffer_select (req , * iovec , needs_lock );
2858
2856
if (!ret ) {
@@ -2930,21 +2928,29 @@ static ssize_t loop_rw_iter(int rw, struct file *file, struct kiocb *kiocb,
2930
2928
return ret ;
2931
2929
}
2932
2930
2933
- static void io_req_map_rw (struct io_kiocb * req , ssize_t io_size ,
2934
- struct iovec * iovec , struct iovec * fast_iov ,
2935
- struct iov_iter * iter )
2931
+ static void io_req_map_rw (struct io_kiocb * req , const struct iovec * iovec ,
2932
+ const struct iovec * fast_iov , struct iov_iter * iter )
2936
2933
{
2937
2934
struct io_async_rw * rw = & req -> io -> rw ;
2938
2935
2939
- rw -> nr_segs = iter -> nr_segs ;
2940
- rw -> size = io_size ;
2936
+ memcpy (& rw -> iter , iter , sizeof (* iter ));
2937
+ rw -> free_iovec = NULL ;
2938
+ /* can only be fixed buffers, no need to do anything */
2939
+ if (iter -> type == ITER_BVEC )
2940
+ return ;
2941
2941
if (!iovec ) {
2942
- rw -> iov = rw -> fast_iov ;
2943
- if (rw -> iov != fast_iov )
2944
- memcpy (rw -> iov , fast_iov ,
2942
+ unsigned iov_off = 0 ;
2943
+
2944
+ rw -> iter .iov = rw -> fast_iov ;
2945
+ if (iter -> iov != fast_iov ) {
2946
+ iov_off = iter -> iov - fast_iov ;
2947
+ rw -> iter .iov += iov_off ;
2948
+ }
2949
+ if (rw -> fast_iov != fast_iov )
2950
+ memcpy (rw -> fast_iov + iov_off , fast_iov + iov_off ,
2945
2951
sizeof (struct iovec ) * iter -> nr_segs );
2946
2952
} else {
2947
- rw -> iov = iovec ;
2953
+ rw -> free_iovec = iovec ;
2948
2954
req -> flags |= REQ_F_NEED_CLEANUP ;
2949
2955
}
2950
2956
}
@@ -2963,8 +2969,8 @@ static int io_alloc_async_ctx(struct io_kiocb *req)
2963
2969
return __io_alloc_async_ctx (req );
2964
2970
}
2965
2971
2966
- static int io_setup_async_rw (struct io_kiocb * req , ssize_t io_size ,
2967
- struct iovec * iovec , struct iovec * fast_iov ,
2972
+ static int io_setup_async_rw (struct io_kiocb * req , const struct iovec * iovec ,
2973
+ const struct iovec * fast_iov ,
2968
2974
struct iov_iter * iter )
2969
2975
{
2970
2976
if (!io_op_defs [req -> opcode ].async_ctx )
@@ -2973,26 +2979,27 @@ static int io_setup_async_rw(struct io_kiocb *req, ssize_t io_size,
2973
2979
if (__io_alloc_async_ctx (req ))
2974
2980
return - ENOMEM ;
2975
2981
2976
- io_req_map_rw (req , io_size , iovec , fast_iov , iter );
2982
+ io_req_map_rw (req , iovec , fast_iov , iter );
2977
2983
}
2978
2984
return 0 ;
2979
2985
}
2980
2986
2981
2987
static inline int io_rw_prep_async (struct io_kiocb * req , int rw ,
2982
2988
bool force_nonblock )
2983
2989
{
2984
- struct io_async_ctx * io = req -> io ;
2985
- struct iov_iter iter ;
2990
+ struct io_async_rw * iorw = & req -> io -> rw ;
2986
2991
ssize_t ret ;
2987
2992
2988
- io -> rw .iov = io -> rw .fast_iov ;
2993
+ iorw -> iter .iov = iorw -> fast_iov ;
2994
+ /* reset ->io around the iovec import, we don't want to use it */
2989
2995
req -> io = NULL ;
2990
- ret = io_import_iovec (rw , req , & io -> rw .iov , & iter , !force_nonblock );
2991
- req -> io = io ;
2996
+ ret = io_import_iovec (rw , req , (struct iovec * * ) & iorw -> iter .iov ,
2997
+ & iorw -> iter , !force_nonblock );
2998
+ req -> io = container_of (iorw , struct io_async_ctx , rw );
2992
2999
if (unlikely (ret < 0 ))
2993
3000
return ret ;
2994
3001
2995
- io_req_map_rw (req , ret , io -> rw .iov , io -> rw . fast_iov , & iter );
3002
+ io_req_map_rw (req , iorw -> iter .iov , iorw -> fast_iov , & iorw -> iter );
2996
3003
return 0 ;
2997
3004
}
2998
3005
@@ -3090,7 +3097,8 @@ static inline int kiocb_wait_page_queue_init(struct kiocb *kiocb,
3090
3097
* succeed, or in rare cases where it fails, we then fall back to using the
3091
3098
* async worker threads for a blocking retry.
3092
3099
*/
3093
- static bool io_rw_should_retry (struct io_kiocb * req )
3100
+ static bool io_rw_should_retry (struct io_kiocb * req , struct iovec * iovec ,
3101
+ struct iovec * fast_iov , struct iov_iter * iter )
3094
3102
{
3095
3103
struct kiocb * kiocb = & req -> rw .kiocb ;
3096
3104
int ret ;
@@ -3113,8 +3121,11 @@ static bool io_rw_should_retry(struct io_kiocb *req)
3113
3121
* If request type doesn't require req->io to defer in general,
3114
3122
* we need to allocate it here
3115
3123
*/
3116
- if (!req -> io && __io_alloc_async_ctx (req ))
3117
- return false;
3124
+ if (!req -> io ) {
3125
+ if (__io_alloc_async_ctx (req ))
3126
+ return false;
3127
+ io_req_map_rw (req , iovec , fast_iov , iter );
3128
+ }
3118
3129
3119
3130
ret = kiocb_wait_page_queue_init (kiocb , & req -> io -> rw .wpq ,
3120
3131
io_async_buf_func , req );
@@ -3141,12 +3152,14 @@ static int io_read(struct io_kiocb *req, bool force_nonblock,
3141
3152
{
3142
3153
struct iovec inline_vecs [UIO_FASTIOV ], * iovec = inline_vecs ;
3143
3154
struct kiocb * kiocb = & req -> rw .kiocb ;
3144
- struct iov_iter iter ;
3155
+ struct iov_iter __iter , * iter = & __iter ;
3145
3156
size_t iov_count ;
3146
- ssize_t io_size , ret , ret2 ;
3147
- unsigned long nr_segs ;
3157
+ ssize_t io_size , ret , ret2 = 0 ;
3158
+
3159
+ if (req -> io )
3160
+ iter = & req -> io -> rw .iter ;
3148
3161
3149
- ret = io_import_iovec (READ , req , & iovec , & iter , !force_nonblock );
3162
+ ret = io_import_iovec (READ , req , & iovec , iter , !force_nonblock );
3150
3163
if (ret < 0 )
3151
3164
return ret ;
3152
3165
io_size = ret ;
@@ -3160,30 +3173,26 @@ static int io_read(struct io_kiocb *req, bool force_nonblock,
3160
3173
if (force_nonblock && !io_file_supports_async (req -> file , READ ))
3161
3174
goto copy_iov ;
3162
3175
3163
- iov_count = iov_iter_count (& iter );
3164
- nr_segs = iter .nr_segs ;
3176
+ iov_count = iov_iter_count (iter );
3165
3177
ret = rw_verify_area (READ , req -> file , & kiocb -> ki_pos , iov_count );
3166
3178
if (unlikely (ret ))
3167
3179
goto out_free ;
3168
3180
3169
- ret2 = io_iter_do_read (req , & iter );
3181
+ ret2 = io_iter_do_read (req , iter );
3170
3182
3171
3183
/* Catch -EAGAIN return for forced non-blocking submission */
3172
3184
if (!force_nonblock || (ret2 != - EAGAIN && ret2 != - EIO )) {
3173
3185
kiocb_done (kiocb , ret2 , cs );
3174
3186
} else {
3175
- iter .count = iov_count ;
3176
- iter .nr_segs = nr_segs ;
3177
3187
copy_iov :
3178
- ret = io_setup_async_rw (req , io_size , iovec , inline_vecs ,
3179
- & iter );
3188
+ ret = io_setup_async_rw (req , iovec , inline_vecs , iter );
3180
3189
if (ret )
3181
3190
goto out_free ;
3182
3191
/* it's copied and will be cleaned with ->io */
3183
3192
iovec = NULL ;
3184
3193
/* if we can retry, do so with the callbacks armed */
3185
- if (io_rw_should_retry (req )) {
3186
- ret2 = io_iter_do_read (req , & iter );
3194
+ if (io_rw_should_retry (req , iovec , inline_vecs , iter )) {
3195
+ ret2 = io_iter_do_read (req , iter );
3187
3196
if (ret2 == - EIOCBQUEUED ) {
3188
3197
goto out_free ;
3189
3198
} else if (ret2 != - EAGAIN ) {
@@ -3223,12 +3232,14 @@ static int io_write(struct io_kiocb *req, bool force_nonblock,
3223
3232
{
3224
3233
struct iovec inline_vecs [UIO_FASTIOV ], * iovec = inline_vecs ;
3225
3234
struct kiocb * kiocb = & req -> rw .kiocb ;
3226
- struct iov_iter iter ;
3235
+ struct iov_iter __iter , * iter = & __iter ;
3227
3236
size_t iov_count ;
3228
3237
ssize_t ret , ret2 , io_size ;
3229
- unsigned long nr_segs ;
3230
3238
3231
- ret = io_import_iovec (WRITE , req , & iovec , & iter , !force_nonblock );
3239
+ if (req -> io )
3240
+ iter = & req -> io -> rw .iter ;
3241
+
3242
+ ret = io_import_iovec (WRITE , req , & iovec , iter , !force_nonblock );
3232
3243
if (ret < 0 )
3233
3244
return ret ;
3234
3245
io_size = ret ;
@@ -3247,8 +3258,7 @@ static int io_write(struct io_kiocb *req, bool force_nonblock,
3247
3258
(req -> flags & REQ_F_ISREG ))
3248
3259
goto copy_iov ;
3249
3260
3250
- iov_count = iov_iter_count (& iter );
3251
- nr_segs = iter .nr_segs ;
3261
+ iov_count = iov_iter_count (iter );
3252
3262
ret = rw_verify_area (WRITE , req -> file , & kiocb -> ki_pos , iov_count );
3253
3263
if (unlikely (ret ))
3254
3264
goto out_free ;
@@ -3269,9 +3279,9 @@ static int io_write(struct io_kiocb *req, bool force_nonblock,
3269
3279
kiocb -> ki_flags |= IOCB_WRITE ;
3270
3280
3271
3281
if (req -> file -> f_op -> write_iter )
3272
- ret2 = call_write_iter (req -> file , kiocb , & iter );
3282
+ ret2 = call_write_iter (req -> file , kiocb , iter );
3273
3283
else if (req -> file -> f_op -> write )
3274
- ret2 = loop_rw_iter (WRITE , req -> file , kiocb , & iter );
3284
+ ret2 = loop_rw_iter (WRITE , req -> file , kiocb , iter );
3275
3285
else
3276
3286
ret2 = - EINVAL ;
3277
3287
@@ -3284,16 +3294,10 @@ static int io_write(struct io_kiocb *req, bool force_nonblock,
3284
3294
if (!force_nonblock || ret2 != - EAGAIN ) {
3285
3295
kiocb_done (kiocb , ret2 , cs );
3286
3296
} else {
3287
- iter .count = iov_count ;
3288
- iter .nr_segs = nr_segs ;
3289
3297
copy_iov :
3290
- ret = io_setup_async_rw (req , io_size , iovec , inline_vecs ,
3291
- & iter );
3292
- if (ret )
3293
- goto out_free ;
3294
- /* it's copied and will be cleaned with ->io */
3295
- iovec = NULL ;
3296
- return - EAGAIN ;
3298
+ ret = io_setup_async_rw (req , iovec , inline_vecs , iter );
3299
+ if (!ret )
3300
+ return - EAGAIN ;
3297
3301
}
3298
3302
out_free :
3299
3303
if (iovec )
@@ -5583,8 +5587,8 @@ static void __io_clean_op(struct io_kiocb *req)
5583
5587
case IORING_OP_WRITEV :
5584
5588
case IORING_OP_WRITE_FIXED :
5585
5589
case IORING_OP_WRITE :
5586
- if (io -> rw .iov != io -> rw . fast_iov )
5587
- kfree (io -> rw .iov );
5590
+ if (io -> rw .free_iovec )
5591
+ kfree (io -> rw .free_iovec );
5588
5592
break ;
5589
5593
case IORING_OP_RECVMSG :
5590
5594
case IORING_OP_SENDMSG :
0 commit comments