File tree Expand file tree Collapse file tree 2 files changed +22
-21
lines changed Expand file tree Collapse file tree 2 files changed +22
-21
lines changed Original file line number Diff line number Diff line change @@ -2477,33 +2477,12 @@ int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr)
2477
2477
return ret ;
2478
2478
}
2479
2479
2480
- struct io_wait_queue {
2481
- struct wait_queue_entry wq ;
2482
- struct io_ring_ctx * ctx ;
2483
- unsigned cq_tail ;
2484
- unsigned nr_timeouts ;
2485
- ktime_t timeout ;
2486
- };
2487
-
2488
2480
static inline bool io_has_work (struct io_ring_ctx * ctx )
2489
2481
{
2490
2482
return test_bit (IO_CHECK_CQ_OVERFLOW_BIT , & ctx -> check_cq ) ||
2491
2483
!llist_empty (& ctx -> work_llist );
2492
2484
}
2493
2485
2494
- static inline bool io_should_wake (struct io_wait_queue * iowq )
2495
- {
2496
- struct io_ring_ctx * ctx = iowq -> ctx ;
2497
- int dist = READ_ONCE (ctx -> rings -> cq .tail ) - (int ) iowq -> cq_tail ;
2498
-
2499
- /*
2500
- * Wake up if we have enough events, or if a timeout occurred since we
2501
- * started waiting. For timeouts, we always want to return to userspace,
2502
- * regardless of event count.
2503
- */
2504
- return dist >= 0 || atomic_read (& ctx -> cq_timeouts ) != iowq -> nr_timeouts ;
2505
- }
2506
-
2507
2486
static int io_wake_function (struct wait_queue_entry * curr , unsigned int mode ,
2508
2487
int wake_flags , void * key )
2509
2488
{
Original file line number Diff line number Diff line change @@ -35,6 +35,28 @@ enum {
35
35
IOU_STOP_MULTISHOT = - ECANCELED ,
36
36
};
37
37
38
+ struct io_wait_queue {
39
+ struct wait_queue_entry wq ;
40
+ struct io_ring_ctx * ctx ;
41
+ unsigned cq_tail ;
42
+ unsigned nr_timeouts ;
43
+ ktime_t timeout ;
44
+
45
+ };
46
+
47
+ static inline bool io_should_wake (struct io_wait_queue * iowq )
48
+ {
49
+ struct io_ring_ctx * ctx = iowq -> ctx ;
50
+ int dist = READ_ONCE (ctx -> rings -> cq .tail ) - (int ) iowq -> cq_tail ;
51
+
52
+ /*
53
+ * Wake up if we have enough events, or if a timeout occurred since we
54
+ * started waiting. For timeouts, we always want to return to userspace,
55
+ * regardless of event count.
56
+ */
57
+ return dist >= 0 || atomic_read (& ctx -> cq_timeouts ) != iowq -> nr_timeouts ;
58
+ }
59
+
38
60
bool io_cqe_cache_refill (struct io_ring_ctx * ctx , bool overflow );
39
61
void io_req_cqe_overflow (struct io_kiocb * req );
40
62
int io_run_task_work_sig (struct io_ring_ctx * ctx );
You can’t perform that action at this time.
0 commit comments