@@ -839,6 +839,8 @@ struct io_kiocb {
839
839
struct hlist_node hash_node ;
840
840
struct async_poll * apoll ;
841
841
struct io_wq_work work ;
842
+ /* store used ubuf, so we can prevent reloading */
843
+ struct io_mapped_ubuf * imu ;
842
844
};
843
845
844
846
struct io_tctx_node {
@@ -2683,6 +2685,12 @@ static int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe)
2683
2685
kiocb -> ki_complete = io_complete_rw ;
2684
2686
}
2685
2687
2688
+ if (req -> opcode == IORING_OP_READ_FIXED ||
2689
+ req -> opcode == IORING_OP_WRITE_FIXED ) {
2690
+ req -> imu = NULL ;
2691
+ io_req_set_rsrc_node (req );
2692
+ }
2693
+
2686
2694
req -> rw .addr = READ_ONCE (sqe -> addr );
2687
2695
req -> rw .len = READ_ONCE (sqe -> len );
2688
2696
req -> buf_index = READ_ONCE (sqe -> buf_index );
@@ -2748,21 +2756,13 @@ static void kiocb_done(struct kiocb *kiocb, ssize_t ret,
2748
2756
}
2749
2757
}
2750
2758
2751
- static int io_import_fixed (struct io_kiocb * req , int rw , struct iov_iter * iter )
2759
+ static int __io_import_fixed (struct io_kiocb * req , int rw , struct iov_iter * iter ,
2760
+ struct io_mapped_ubuf * imu )
2752
2761
{
2753
- struct io_ring_ctx * ctx = req -> ctx ;
2754
2762
size_t len = req -> rw .len ;
2755
- struct io_mapped_ubuf * imu ;
2756
- u16 index , buf_index = req -> buf_index ;
2757
2763
u64 buf_end , buf_addr = req -> rw .addr ;
2758
2764
size_t offset ;
2759
2765
2760
- if (unlikely (buf_index >= ctx -> nr_user_bufs ))
2761
- return - EFAULT ;
2762
- index = array_index_nospec (buf_index , ctx -> nr_user_bufs );
2763
- imu = ctx -> user_bufs [index ];
2764
- buf_addr = req -> rw .addr ;
2765
-
2766
2766
if (unlikely (check_add_overflow (buf_addr , (u64 )len , & buf_end )))
2767
2767
return - EFAULT ;
2768
2768
/* not inside the mapped region */
@@ -2814,6 +2814,22 @@ static int io_import_fixed(struct io_kiocb *req, int rw, struct iov_iter *iter)
2814
2814
return 0 ;
2815
2815
}
2816
2816
2817
+ static int io_import_fixed (struct io_kiocb * req , int rw , struct iov_iter * iter )
2818
+ {
2819
+ struct io_ring_ctx * ctx = req -> ctx ;
2820
+ struct io_mapped_ubuf * imu = req -> imu ;
2821
+ u16 index , buf_index = req -> buf_index ;
2822
+
2823
+ if (likely (!imu )) {
2824
+ if (unlikely (buf_index >= ctx -> nr_user_bufs ))
2825
+ return - EFAULT ;
2826
+ index = array_index_nospec (buf_index , ctx -> nr_user_bufs );
2827
+ imu = READ_ONCE (ctx -> user_bufs [index ]);
2828
+ req -> imu = imu ;
2829
+ }
2830
+ return __io_import_fixed (req , rw , iter , imu );
2831
+ }
2832
+
2817
2833
static void io_ring_submit_unlock (struct io_ring_ctx * ctx , bool needs_lock )
2818
2834
{
2819
2835
if (needs_lock )
@@ -9506,6 +9522,9 @@ static int io_uring_create(unsigned entries, struct io_uring_params *p,
9506
9522
ret = io_sq_offload_create (ctx , p );
9507
9523
if (ret )
9508
9524
goto err ;
9525
+ /* always set a rsrc node */
9526
+ io_rsrc_node_switch_start (ctx );
9527
+ io_rsrc_node_switch (ctx , NULL );
9509
9528
9510
9529
memset (& p -> sq_off , 0 , sizeof (p -> sq_off ));
9511
9530
p -> sq_off .head = offsetof(struct io_rings , sq .head );
0 commit comments