@@ -1025,7 +1025,8 @@ static void io_uring_try_cancel_requests(struct io_ring_ctx *ctx,
1025
1025
static void io_uring_cancel_sqpoll (struct io_sq_data * sqd );
1026
1026
static struct io_rsrc_node * io_rsrc_node_alloc (struct io_ring_ctx * ctx );
1027
1027
1028
- static bool io_cqring_fill_event (struct io_kiocb * req , long res , unsigned cflags );
1028
+ static bool io_cqring_fill_event (struct io_ring_ctx * ctx , u64 user_data ,
1029
+ long res , unsigned int cflags );
1029
1030
static void io_put_req (struct io_kiocb * req );
1030
1031
static void io_put_req_deferred (struct io_kiocb * req , int nr );
1031
1032
static void io_dismantle_req (struct io_kiocb * req );
@@ -1266,7 +1267,7 @@ static void io_kill_timeout(struct io_kiocb *req, int status)
1266
1267
atomic_set (& req -> ctx -> cq_timeouts ,
1267
1268
atomic_read (& req -> ctx -> cq_timeouts ) + 1 );
1268
1269
list_del_init (& req -> timeout .list );
1269
- io_cqring_fill_event (req , status , 0 );
1270
+ io_cqring_fill_event (req -> ctx , req -> user_data , status , 0 );
1270
1271
io_put_req_deferred (req , 1 );
1271
1272
}
1272
1273
}
@@ -1500,10 +1501,9 @@ static inline void req_ref_get(struct io_kiocb *req)
1500
1501
atomic_inc (& req -> refs );
1501
1502
}
1502
1503
1503
- static bool io_cqring_event_overflow (struct io_kiocb * req , long res ,
1504
- unsigned int cflags )
1504
+ static bool io_cqring_event_overflow (struct io_ring_ctx * ctx , u64 user_data ,
1505
+ long res , unsigned int cflags )
1505
1506
{
1506
- struct io_ring_ctx * ctx = req -> ctx ;
1507
1507
struct io_overflow_cqe * ocqe ;
1508
1508
1509
1509
ocqe = kmalloc (sizeof (* ocqe ), GFP_ATOMIC | __GFP_ACCOUNT );
@@ -1521,20 +1521,19 @@ static bool io_cqring_event_overflow(struct io_kiocb *req, long res,
1521
1521
set_bit (0 , & ctx -> cq_check_overflow );
1522
1522
ctx -> rings -> sq_flags |= IORING_SQ_CQ_OVERFLOW ;
1523
1523
}
1524
- ocqe -> cqe .user_data = req -> user_data ;
1524
+ ocqe -> cqe .user_data = user_data ;
1525
1525
ocqe -> cqe .res = res ;
1526
1526
ocqe -> cqe .flags = cflags ;
1527
1527
list_add_tail (& ocqe -> list , & ctx -> cq_overflow_list );
1528
1528
return true;
1529
1529
}
1530
1530
1531
- static inline bool __io_cqring_fill_event (struct io_kiocb * req , long res ,
1532
- unsigned int cflags )
1531
+ static inline bool __io_cqring_fill_event (struct io_ring_ctx * ctx , u64 user_data ,
1532
+ long res , unsigned int cflags )
1533
1533
{
1534
- struct io_ring_ctx * ctx = req -> ctx ;
1535
1534
struct io_uring_cqe * cqe ;
1536
1535
1537
- trace_io_uring_complete (ctx , req -> user_data , res , cflags );
1536
+ trace_io_uring_complete (ctx , user_data , res , cflags );
1538
1537
1539
1538
/*
1540
1539
* If we can't get a cq entry, userspace overflowed the
@@ -1543,19 +1542,19 @@ static inline bool __io_cqring_fill_event(struct io_kiocb *req, long res,
1543
1542
*/
1544
1543
cqe = io_get_cqring (ctx );
1545
1544
if (likely (cqe )) {
1546
- WRITE_ONCE (cqe -> user_data , req -> user_data );
1545
+ WRITE_ONCE (cqe -> user_data , user_data );
1547
1546
WRITE_ONCE (cqe -> res , res );
1548
1547
WRITE_ONCE (cqe -> flags , cflags );
1549
1548
return true;
1550
1549
}
1551
- return io_cqring_event_overflow (req , res , cflags );
1550
+ return io_cqring_event_overflow (ctx , user_data , res , cflags );
1552
1551
}
1553
1552
1554
1553
/* not as hot to bloat with inlining */
1555
- static noinline bool io_cqring_fill_event (struct io_kiocb * req , long res ,
1556
- unsigned int cflags )
1554
+ static noinline bool io_cqring_fill_event (struct io_ring_ctx * ctx , u64 user_data ,
1555
+ long res , unsigned int cflags )
1557
1556
{
1558
- return __io_cqring_fill_event (req , res , cflags );
1557
+ return __io_cqring_fill_event (ctx , user_data , res , cflags );
1559
1558
}
1560
1559
1561
1560
static void io_req_complete_post (struct io_kiocb * req , long res ,
@@ -1565,7 +1564,7 @@ static void io_req_complete_post(struct io_kiocb *req, long res,
1565
1564
unsigned long flags ;
1566
1565
1567
1566
spin_lock_irqsave (& ctx -> completion_lock , flags );
1568
- __io_cqring_fill_event (req , res , cflags );
1567
+ __io_cqring_fill_event (ctx , req -> user_data , res , cflags );
1569
1568
/*
1570
1569
* If we're the last reference to this request, add to our locked
1571
1570
* free_list cache.
@@ -1776,7 +1775,8 @@ static bool io_kill_linked_timeout(struct io_kiocb *req)
1776
1775
io_remove_next_linked (req );
1777
1776
link -> timeout .head = NULL ;
1778
1777
if (hrtimer_try_to_cancel (& io -> timer ) != -1 ) {
1779
- io_cqring_fill_event (link , - ECANCELED , 0 );
1778
+ io_cqring_fill_event (link -> ctx , link -> user_data ,
1779
+ - ECANCELED , 0 );
1780
1780
io_put_req_deferred (link , 1 );
1781
1781
return true;
1782
1782
}
@@ -1795,7 +1795,7 @@ static void io_fail_links(struct io_kiocb *req)
1795
1795
link -> link = NULL ;
1796
1796
1797
1797
trace_io_uring_fail_link (req , link );
1798
- io_cqring_fill_event (link , - ECANCELED , 0 );
1798
+ io_cqring_fill_event (link -> ctx , link -> user_data , - ECANCELED , 0 );
1799
1799
io_put_req_deferred (link , 2 );
1800
1800
link = nxt ;
1801
1801
}
@@ -2116,7 +2116,8 @@ static void io_submit_flush_completions(struct io_comp_state *cs,
2116
2116
spin_lock_irq (& ctx -> completion_lock );
2117
2117
for (i = 0 ; i < nr ; i ++ ) {
2118
2118
req = cs -> reqs [i ];
2119
- __io_cqring_fill_event (req , req -> result , req -> compl .cflags );
2119
+ __io_cqring_fill_event (ctx , req -> user_data , req -> result ,
2120
+ req -> compl .cflags );
2120
2121
}
2121
2122
io_commit_cqring (ctx );
2122
2123
spin_unlock_irq (& ctx -> completion_lock );
@@ -2256,7 +2257,7 @@ static void io_iopoll_complete(struct io_ring_ctx *ctx, unsigned int *nr_events,
2256
2257
if (req -> flags & REQ_F_BUFFER_SELECTED )
2257
2258
cflags = io_put_rw_kbuf (req );
2258
2259
2259
- __io_cqring_fill_event (req , req -> result , cflags );
2260
+ __io_cqring_fill_event (ctx , req -> user_data , req -> result , cflags );
2260
2261
(* nr_events )++ ;
2261
2262
2262
2263
if (req_ref_put_and_test (req ))
@@ -4875,7 +4876,7 @@ static bool io_poll_complete(struct io_kiocb *req, __poll_t mask)
4875
4876
}
4876
4877
if (req -> poll .events & EPOLLONESHOT )
4877
4878
flags = 0 ;
4878
- if (!io_cqring_fill_event (req , error , flags )) {
4879
+ if (!io_cqring_fill_event (ctx , req -> user_data , error , flags )) {
4879
4880
io_poll_remove_waitqs (req );
4880
4881
req -> poll .done = true;
4881
4882
flags = 0 ;
@@ -5203,7 +5204,7 @@ static bool io_poll_remove_one(struct io_kiocb *req)
5203
5204
5204
5205
do_complete = io_poll_remove_waitqs (req );
5205
5206
if (do_complete ) {
5206
- io_cqring_fill_event (req , - ECANCELED , 0 );
5207
+ io_cqring_fill_event (req -> ctx , req -> user_data , - ECANCELED , 0 );
5207
5208
io_commit_cqring (req -> ctx );
5208
5209
req_set_fail_links (req );
5209
5210
io_put_req_deferred (req , 1 );
@@ -5455,7 +5456,7 @@ static enum hrtimer_restart io_timeout_fn(struct hrtimer *timer)
5455
5456
atomic_set (& req -> ctx -> cq_timeouts ,
5456
5457
atomic_read (& req -> ctx -> cq_timeouts ) + 1 );
5457
5458
5458
- io_cqring_fill_event (req , - ETIME , 0 );
5459
+ io_cqring_fill_event (ctx , req -> user_data , - ETIME , 0 );
5459
5460
io_commit_cqring (ctx );
5460
5461
spin_unlock_irqrestore (& ctx -> completion_lock , flags );
5461
5462
@@ -5497,7 +5498,7 @@ static int io_timeout_cancel(struct io_ring_ctx *ctx, __u64 user_data)
5497
5498
return PTR_ERR (req );
5498
5499
5499
5500
req_set_fail_links (req );
5500
- io_cqring_fill_event (req , - ECANCELED , 0 );
5501
+ io_cqring_fill_event (ctx , req -> user_data , - ECANCELED , 0 );
5501
5502
io_put_req_deferred (req , 1 );
5502
5503
return 0 ;
5503
5504
}
@@ -5570,7 +5571,7 @@ static int io_timeout_remove(struct io_kiocb *req, unsigned int issue_flags)
5570
5571
ret = io_timeout_update (ctx , tr -> addr , & tr -> ts ,
5571
5572
io_translate_timeout_mode (tr -> flags ));
5572
5573
5573
- io_cqring_fill_event (req , ret , 0 );
5574
+ io_cqring_fill_event (ctx , req -> user_data , ret , 0 );
5574
5575
io_commit_cqring (ctx );
5575
5576
spin_unlock_irq (& ctx -> completion_lock );
5576
5577
io_cqring_ev_posted (ctx );
@@ -5722,7 +5723,7 @@ static void io_async_find_and_cancel(struct io_ring_ctx *ctx,
5722
5723
done :
5723
5724
if (!ret )
5724
5725
ret = success_ret ;
5725
- io_cqring_fill_event (req , ret , 0 );
5726
+ io_cqring_fill_event (ctx , req -> user_data , ret , 0 );
5726
5727
io_commit_cqring (ctx );
5727
5728
spin_unlock_irqrestore (& ctx -> completion_lock , flags );
5728
5729
io_cqring_ev_posted (ctx );
@@ -5779,7 +5780,7 @@ static int io_async_cancel(struct io_kiocb *req, unsigned int issue_flags)
5779
5780
5780
5781
spin_lock_irq (& ctx -> completion_lock );
5781
5782
done :
5782
- io_cqring_fill_event (req , ret , 0 );
5783
+ io_cqring_fill_event (ctx , req -> user_data , ret , 0 );
5783
5784
io_commit_cqring (ctx );
5784
5785
spin_unlock_irq (& ctx -> completion_lock );
5785
5786
io_cqring_ev_posted (ctx );
0 commit comments