Skip to content

Commit 405b4dc

Browse files
Stefan Roeschaxboe
authored andcommitted
io-uring: move io_wait_queue definition to header file
This moves the definition of the io_wait_queue structure to the header file so it can be also used from other files. Signed-off-by: Stefan Roesch <[email protected]> Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Jens Axboe <[email protected]>
1 parent adaad27 commit 405b4dc

File tree

2 files changed

+22
-21
lines changed

2 files changed

+22
-21
lines changed

io_uring/io_uring.c

Lines changed: 0 additions & 21 deletions
Original file line numberDiff line numberDiff line change
@@ -2477,33 +2477,12 @@ int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr)
24772477
return ret;
24782478
}
24792479

2480-
struct io_wait_queue {
2481-
struct wait_queue_entry wq;
2482-
struct io_ring_ctx *ctx;
2483-
unsigned cq_tail;
2484-
unsigned nr_timeouts;
2485-
ktime_t timeout;
2486-
};
2487-
24882480
static inline bool io_has_work(struct io_ring_ctx *ctx)
24892481
{
24902482
return test_bit(IO_CHECK_CQ_OVERFLOW_BIT, &ctx->check_cq) ||
24912483
!llist_empty(&ctx->work_llist);
24922484
}
24932485

2494-
static inline bool io_should_wake(struct io_wait_queue *iowq)
2495-
{
2496-
struct io_ring_ctx *ctx = iowq->ctx;
2497-
int dist = READ_ONCE(ctx->rings->cq.tail) - (int) iowq->cq_tail;
2498-
2499-
/*
2500-
* Wake up if we have enough events, or if a timeout occurred since we
2501-
* started waiting. For timeouts, we always want to return to userspace,
2502-
* regardless of event count.
2503-
*/
2504-
return dist >= 0 || atomic_read(&ctx->cq_timeouts) != iowq->nr_timeouts;
2505-
}
2506-
25072486
static int io_wake_function(struct wait_queue_entry *curr, unsigned int mode,
25082487
int wake_flags, void *key)
25092488
{

io_uring/io_uring.h

Lines changed: 22 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -35,6 +35,28 @@ enum {
3535
IOU_STOP_MULTISHOT = -ECANCELED,
3636
};
3737

38+
struct io_wait_queue {
39+
struct wait_queue_entry wq;
40+
struct io_ring_ctx *ctx;
41+
unsigned cq_tail;
42+
unsigned nr_timeouts;
43+
ktime_t timeout;
44+
45+
};
46+
47+
static inline bool io_should_wake(struct io_wait_queue *iowq)
48+
{
49+
struct io_ring_ctx *ctx = iowq->ctx;
50+
int dist = READ_ONCE(ctx->rings->cq.tail) - (int) iowq->cq_tail;
51+
52+
/*
53+
* Wake up if we have enough events, or if a timeout occurred since we
54+
* started waiting. For timeouts, we always want to return to userspace,
55+
* regardless of event count.
56+
*/
57+
return dist >= 0 || atomic_read(&ctx->cq_timeouts) != iowq->nr_timeouts;
58+
}
59+
3860
bool io_cqe_cache_refill(struct io_ring_ctx *ctx, bool overflow);
3961
void io_req_cqe_overflow(struct io_kiocb *req);
4062
int io_run_task_work_sig(struct io_ring_ctx *ctx);

0 commit comments

Comments
 (0)