@@ -676,7 +676,6 @@ struct io_kiocb {
676
676
};
677
677
};
678
678
679
- #define IO_PLUG_THRESHOLD 2
680
679
#define IO_IOPOLL_BATCH 8
681
680
682
681
struct io_submit_state {
@@ -5914,7 +5913,7 @@ static int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req,
5914
5913
static int io_submit_sqes (struct io_ring_ctx * ctx , unsigned int nr ,
5915
5914
struct file * ring_file , int ring_fd )
5916
5915
{
5917
- struct io_submit_state state , * statep = NULL ;
5916
+ struct io_submit_state state ;
5918
5917
struct io_kiocb * link = NULL ;
5919
5918
int i , submitted = 0 ;
5920
5919
@@ -5931,10 +5930,7 @@ static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr,
5931
5930
if (!percpu_ref_tryget_many (& ctx -> refs , nr ))
5932
5931
return - EAGAIN ;
5933
5932
5934
- if (nr > IO_PLUG_THRESHOLD ) {
5935
- io_submit_state_start (& state , nr );
5936
- statep = & state ;
5937
- }
5933
+ io_submit_state_start (& state , nr );
5938
5934
5939
5935
ctx -> ring_fd = ring_fd ;
5940
5936
ctx -> ring_file = ring_file ;
@@ -5949,14 +5945,14 @@ static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr,
5949
5945
io_consume_sqe (ctx );
5950
5946
break ;
5951
5947
}
5952
- req = io_alloc_req (ctx , statep );
5948
+ req = io_alloc_req (ctx , & state );
5953
5949
if (unlikely (!req )) {
5954
5950
if (!submitted )
5955
5951
submitted = - EAGAIN ;
5956
5952
break ;
5957
5953
}
5958
5954
5959
- err = io_init_req (ctx , req , sqe , statep );
5955
+ err = io_init_req (ctx , req , sqe , & state );
5960
5956
io_consume_sqe (ctx );
5961
5957
/* will complete beyond this point, count as submitted */
5962
5958
submitted ++ ;
@@ -5982,8 +5978,7 @@ static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr,
5982
5978
}
5983
5979
if (link )
5984
5980
io_queue_link_head (link );
5985
- if (statep )
5986
- io_submit_state_end (& state );
5981
+ io_submit_state_end (& state );
5987
5982
5988
5983
/* Commit SQ ring head once we've consumed and submitted all SQEs */
5989
5984
io_commit_sqring (ctx );
0 commit comments