@@ -900,6 +900,13 @@ static int io_file_get(struct io_submit_state *state, struct io_kiocb *req,
900
900
static void __io_queue_sqe (struct io_kiocb * req ,
901
901
const struct io_uring_sqe * sqe );
902
902
903
+ static ssize_t io_import_iovec (int rw , struct io_kiocb * req ,
904
+ struct iovec * * iovec , struct iov_iter * iter ,
905
+ bool needs_lock );
906
+ static int io_setup_async_rw (struct io_kiocb * req , ssize_t io_size ,
907
+ struct iovec * iovec , struct iovec * fast_iov ,
908
+ struct iov_iter * iter );
909
+
903
910
static struct kmem_cache * req_cachep ;
904
911
905
912
static const struct file_operations io_uring_fops ;
@@ -1978,12 +1985,115 @@ static void io_complete_rw_common(struct kiocb *kiocb, long res)
1978
1985
__io_cqring_add_event (req , res , cflags );
1979
1986
}
1980
1987
1988
+ static void io_sq_thread_drop_mm (struct io_ring_ctx * ctx )
1989
+ {
1990
+ struct mm_struct * mm = current -> mm ;
1991
+
1992
+ if (mm ) {
1993
+ kthread_unuse_mm (mm );
1994
+ mmput (mm );
1995
+ }
1996
+ }
1997
+
1998
+ static int io_sq_thread_acquire_mm (struct io_ring_ctx * ctx ,
1999
+ struct io_kiocb * req )
2000
+ {
2001
+ if (io_op_defs [req -> opcode ].needs_mm && !current -> mm ) {
2002
+ if (unlikely (!mmget_not_zero (ctx -> sqo_mm )))
2003
+ return - EFAULT ;
2004
+ kthread_use_mm (ctx -> sqo_mm );
2005
+ }
2006
+
2007
+ return 0 ;
2008
+ }
2009
+
2010
+ #ifdef CONFIG_BLOCK
2011
+ static bool io_resubmit_prep (struct io_kiocb * req , int error )
2012
+ {
2013
+ struct iovec inline_vecs [UIO_FASTIOV ], * iovec = inline_vecs ;
2014
+ ssize_t ret = - ECANCELED ;
2015
+ struct iov_iter iter ;
2016
+ int rw ;
2017
+
2018
+ if (error ) {
2019
+ ret = error ;
2020
+ goto end_req ;
2021
+ }
2022
+
2023
+ switch (req -> opcode ) {
2024
+ case IORING_OP_READV :
2025
+ case IORING_OP_READ_FIXED :
2026
+ case IORING_OP_READ :
2027
+ rw = READ ;
2028
+ break ;
2029
+ case IORING_OP_WRITEV :
2030
+ case IORING_OP_WRITE_FIXED :
2031
+ case IORING_OP_WRITE :
2032
+ rw = WRITE ;
2033
+ break ;
2034
+ default :
2035
+ printk_once (KERN_WARNING "io_uring: bad opcode in resubmit %d\n" ,
2036
+ req -> opcode );
2037
+ goto end_req ;
2038
+ }
2039
+
2040
+ ret = io_import_iovec (rw , req , & iovec , & iter , false);
2041
+ if (ret < 0 )
2042
+ goto end_req ;
2043
+ ret = io_setup_async_rw (req , ret , iovec , inline_vecs , & iter );
2044
+ if (!ret )
2045
+ return true;
2046
+ kfree (iovec );
2047
+ end_req :
2048
+ io_cqring_add_event (req , ret );
2049
+ req_set_fail_links (req );
2050
+ io_put_req (req );
2051
+ return false;
2052
+ }
2053
+
2054
+ static void io_rw_resubmit (struct callback_head * cb )
2055
+ {
2056
+ struct io_kiocb * req = container_of (cb , struct io_kiocb , task_work );
2057
+ struct io_ring_ctx * ctx = req -> ctx ;
2058
+ int err ;
2059
+
2060
+ __set_current_state (TASK_RUNNING );
2061
+
2062
+ err = io_sq_thread_acquire_mm (ctx , req );
2063
+
2064
+ if (io_resubmit_prep (req , err )) {
2065
+ refcount_inc (& req -> refs );
2066
+ io_queue_async_work (req );
2067
+ }
2068
+ }
2069
+ #endif
2070
+
2071
+ static bool io_rw_reissue (struct io_kiocb * req , long res )
2072
+ {
2073
+ #ifdef CONFIG_BLOCK
2074
+ struct task_struct * tsk ;
2075
+ int ret ;
2076
+
2077
+ if ((res != - EAGAIN && res != - EOPNOTSUPP ) || io_wq_current_is_worker ())
2078
+ return false;
2079
+
2080
+ tsk = req -> task ;
2081
+ init_task_work (& req -> task_work , io_rw_resubmit );
2082
+ ret = task_work_add (tsk , & req -> task_work , true);
2083
+ if (!ret )
2084
+ return true;
2085
+ #endif
2086
+ return false;
2087
+ }
2088
+
1981
2089
static void io_complete_rw (struct kiocb * kiocb , long res , long res2 )
1982
2090
{
1983
2091
struct io_kiocb * req = container_of (kiocb , struct io_kiocb , rw .kiocb );
1984
2092
1985
- io_complete_rw_common (kiocb , res );
1986
- io_put_req (req );
2093
+ if (!io_rw_reissue (req , res )) {
2094
+ io_complete_rw_common (kiocb , res );
2095
+ io_put_req (req );
2096
+ }
1987
2097
}
1988
2098
1989
2099
static void io_complete_rw_iopoll (struct kiocb * kiocb , long res , long res2 )
@@ -2169,6 +2279,9 @@ static int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe,
2169
2279
if (kiocb -> ki_flags & IOCB_NOWAIT )
2170
2280
req -> flags |= REQ_F_NOWAIT ;
2171
2281
2282
+ if (kiocb -> ki_flags & IOCB_DIRECT )
2283
+ io_get_req_task (req );
2284
+
2172
2285
if (force_nonblock )
2173
2286
kiocb -> ki_flags |= IOCB_NOWAIT ;
2174
2287
@@ -2668,6 +2781,7 @@ static int io_read(struct io_kiocb *req, bool force_nonblock)
2668
2781
iov_count = iov_iter_count (& iter );
2669
2782
ret = rw_verify_area (READ , req -> file , & kiocb -> ki_pos , iov_count );
2670
2783
if (!ret ) {
2784
+ unsigned long nr_segs = iter .nr_segs ;
2671
2785
ssize_t ret2 = 0 ;
2672
2786
2673
2787
if (req -> file -> f_op -> read_iter )
@@ -2679,6 +2793,8 @@ static int io_read(struct io_kiocb *req, bool force_nonblock)
2679
2793
if (!force_nonblock || (ret2 != - EAGAIN && ret2 != - EIO )) {
2680
2794
kiocb_done (kiocb , ret2 );
2681
2795
} else {
2796
+ iter .count = iov_count ;
2797
+ iter .nr_segs = nr_segs ;
2682
2798
copy_iov :
2683
2799
ret = io_setup_async_rw (req , io_size , iovec ,
2684
2800
inline_vecs , & iter );
@@ -2765,6 +2881,7 @@ static int io_write(struct io_kiocb *req, bool force_nonblock)
2765
2881
iov_count = iov_iter_count (& iter );
2766
2882
ret = rw_verify_area (WRITE , req -> file , & kiocb -> ki_pos , iov_count );
2767
2883
if (!ret ) {
2884
+ unsigned long nr_segs = iter .nr_segs ;
2768
2885
ssize_t ret2 ;
2769
2886
2770
2887
/*
@@ -2802,6 +2919,8 @@ static int io_write(struct io_kiocb *req, bool force_nonblock)
2802
2919
if (!force_nonblock || ret2 != - EAGAIN ) {
2803
2920
kiocb_done (kiocb , ret2 );
2804
2921
} else {
2922
+ iter .count = iov_count ;
2923
+ iter .nr_segs = nr_segs ;
2805
2924
copy_iov :
2806
2925
ret = io_setup_async_rw (req , io_size , iovec ,
2807
2926
inline_vecs , & iter );
@@ -4282,28 +4401,6 @@ static void io_async_queue_proc(struct file *file, struct wait_queue_head *head,
4282
4401
__io_queue_proc (& pt -> req -> apoll -> poll , pt , head );
4283
4402
}
4284
4403
4285
- static void io_sq_thread_drop_mm (struct io_ring_ctx * ctx )
4286
- {
4287
- struct mm_struct * mm = current -> mm ;
4288
-
4289
- if (mm ) {
4290
- kthread_unuse_mm (mm );
4291
- mmput (mm );
4292
- }
4293
- }
4294
-
4295
- static int io_sq_thread_acquire_mm (struct io_ring_ctx * ctx ,
4296
- struct io_kiocb * req )
4297
- {
4298
- if (io_op_defs [req -> opcode ].needs_mm && !current -> mm ) {
4299
- if (unlikely (!mmget_not_zero (ctx -> sqo_mm )))
4300
- return - EFAULT ;
4301
- kthread_use_mm (ctx -> sqo_mm );
4302
- }
4303
-
4304
- return 0 ;
4305
- }
4306
-
4307
4404
static void io_async_task_func (struct callback_head * cb )
4308
4405
{
4309
4406
struct io_kiocb * req = container_of (cb , struct io_kiocb , task_work );
@@ -5814,6 +5911,9 @@ static void io_submit_state_start(struct io_submit_state *state,
5814
5911
unsigned int max_ios )
5815
5912
{
5816
5913
blk_start_plug (& state -> plug );
5914
+ #ifdef CONFIG_BLOCK
5915
+ state -> plug .nowait = true;
5916
+ #endif
5817
5917
state -> free_reqs = 0 ;
5818
5918
state -> file = NULL ;
5819
5919
state -> ios_left = max_ios ;
0 commit comments