Skip to content

Commit 8d0c12a

Browse files
Stefan Roeschaxboe
authored andcommitted
io-uring: add napi busy poll support
This adds the napi busy polling support in io_uring.c. It adds a new napi_list to the io_ring_ctx structure. This list contains the list of napi_id's that are currently enabled for busy polling. The list is synchronized by the new napi_lock spin lock. The current default napi busy polling time is stored in napi_busy_poll_to. If napi busy polling is not enabled, the value is 0. In addition there is also a hash table. The hash table store the napi id and the pointer to the above list nodes. The hash table is used to speed up the lookup to the list elements. The hash table is synchronized with rcu. The NAPI_TIMEOUT is stored as a timeout to make sure that the time a napi entry is stored in the napi list is limited. The busy poll timeout is also stored as part of the io_wait_queue. This is necessary as for sq polling the poll interval needs to be adjusted and the napi callback allows only to pass in one value. This has been tested with two simple programs from the liburing library repository: the napi client and the napi server program. The client sends a request, which has a timestamp in its payload and the server replies with the same payload. The client calculates the roundtrip time and stores it to calculate the results. The client is running on host1 and the server is running on host 2 (in the same rack). The measured times below are roundtrip times. They are average times over 5 runs each. Each run measures 1 million roundtrips. no rx coal rx coal: frames=88,usecs=33 Default 57us 56us client_poll=100us 47us 46us server_poll=100us 51us 46us client_poll=100us+ 40us 40us server_poll=100us client_poll=100us+ 41us 39us server_poll=100us+ prefer napi busy poll on client client_poll=100us+ 41us 39us server_poll=100us+ prefer napi busy poll on server client_poll=100us+ 41us 39us server_poll=100us+ prefer napi busy poll on client + server Signed-off-by: Stefan Roesch <[email protected]> Suggested-by: Olivier Langlois <[email protected]> Acked-by: Jakub Kicinski <[email protected]> Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Jens Axboe <[email protected]>
1 parent 405b4dc commit 8d0c12a

File tree

7 files changed

+373
-1
lines changed

7 files changed

+373
-1
lines changed

include/linux/io_uring_types.h

Lines changed: 14 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2,6 +2,7 @@
22
#define IO_URING_TYPES_H
33

44
#include <linux/blkdev.h>
5+
#include <linux/hashtable.h>
56
#include <linux/task_work.h>
67
#include <linux/bitmap.h>
78
#include <linux/llist.h>
@@ -247,6 +248,7 @@ struct io_ring_ctx {
247248
struct percpu_ref refs;
248249

249250
enum task_work_notify_mode notify_method;
251+
unsigned sq_thread_idle;
250252
} ____cacheline_aligned_in_smp;
251253

252254
/* submission data */
@@ -410,7 +412,18 @@ struct io_ring_ctx {
410412

411413
struct callback_head poll_wq_task_work;
412414
struct list_head defer_list;
413-
unsigned sq_thread_idle;
415+
416+
#ifdef CONFIG_NET_RX_BUSY_POLL
417+
struct list_head napi_list; /* track busy poll napi_id */
418+
spinlock_t napi_lock; /* napi_list lock */
419+
420+
/* napi busy poll default timeout */
421+
unsigned int napi_busy_poll_to;
422+
bool napi_prefer_busy_poll;
423+
424+
DECLARE_HASHTABLE(napi_ht, 4);
425+
#endif
426+
414427
/* protected by ->completion_lock */
415428
unsigned evfd_last_cq_tail;
416429

io_uring/Makefile

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -11,3 +11,4 @@ obj-$(CONFIG_IO_URING) += io_uring.o xattr.o nop.o fs.o splice.o \
1111
notif.o waitid.o register.o truncate.o
1212
obj-$(CONFIG_IO_WQ) += io-wq.o
1313
obj-$(CONFIG_FUTEX) += futex.o
14+
obj-$(CONFIG_NET_RX_BUSY_POLL) += napi.o

io_uring/io_uring.c

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -95,6 +95,7 @@
9595
#include "notif.h"
9696
#include "waitid.h"
9797
#include "futex.h"
98+
#include "napi.h"
9899

99100
#include "timeout.h"
100101
#include "poll.h"
@@ -349,6 +350,8 @@ static __cold struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p)
349350
INIT_DELAYED_WORK(&ctx->fallback_work, io_fallback_req_func);
350351
INIT_WQ_LIST(&ctx->submit_state.compl_reqs);
351352
INIT_HLIST_HEAD(&ctx->cancelable_uring_cmd);
353+
io_napi_init(ctx);
354+
352355
return ctx;
353356
err:
354357
kfree(ctx->cancel_table.hbs);
@@ -2602,9 +2605,13 @@ static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events,
26022605

26032606
if (get_timespec64(&ts, uts))
26042607
return -EFAULT;
2608+
26052609
iowq.timeout = ktime_add_ns(timespec64_to_ktime(ts), ktime_get_ns());
2610+
io_napi_adjust_timeout(ctx, &iowq, &ts);
26062611
}
26072612

2613+
io_napi_busy_loop(ctx, &iowq);
2614+
26082615
trace_io_uring_cqring_wait(ctx, min_events);
26092616
do {
26102617
int nr_wait = (int) iowq.cq_tail - READ_ONCE(ctx->rings->cq.tail);
@@ -2897,6 +2904,7 @@ static __cold void io_ring_ctx_free(struct io_ring_ctx *ctx)
28972904
io_req_caches_free(ctx);
28982905
if (ctx->hash_map)
28992906
io_wq_put_hash(ctx->hash_map);
2907+
io_napi_free(ctx);
29002908
kfree(ctx->cancel_table.hbs);
29012909
kfree(ctx->cancel_table_locked.hbs);
29022910
kfree(ctx->io_bl);

io_uring/io_uring.h

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -42,6 +42,10 @@ struct io_wait_queue {
4242
unsigned nr_timeouts;
4343
ktime_t timeout;
4444

45+
#ifdef CONFIG_NET_RX_BUSY_POLL
46+
unsigned int napi_busy_poll_to;
47+
bool napi_prefer_busy_poll;
48+
#endif
4549
};
4650

4751
static inline bool io_should_wake(struct io_wait_queue *iowq)

io_uring/napi.c

Lines changed: 255 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,255 @@
1+
// SPDX-License-Identifier: GPL-2.0
2+
3+
#include "io_uring.h"
4+
#include "napi.h"
5+
6+
#ifdef CONFIG_NET_RX_BUSY_POLL
7+
8+
/* Timeout for cleanout of stale entries. */
9+
#define NAPI_TIMEOUT (60 * SEC_CONVERSION)
10+
11+
struct io_napi_entry {
12+
unsigned int napi_id;
13+
struct list_head list;
14+
15+
unsigned long timeout;
16+
struct hlist_node node;
17+
18+
struct rcu_head rcu;
19+
};
20+
21+
static struct io_napi_entry *io_napi_hash_find(struct hlist_head *hash_list,
22+
unsigned int napi_id)
23+
{
24+
struct io_napi_entry *e;
25+
26+
hlist_for_each_entry_rcu(e, hash_list, node) {
27+
if (e->napi_id != napi_id)
28+
continue;
29+
e->timeout = jiffies + NAPI_TIMEOUT;
30+
return e;
31+
}
32+
33+
return NULL;
34+
}
35+
36+
void __io_napi_add(struct io_ring_ctx *ctx, struct socket *sock)
37+
{
38+
struct hlist_head *hash_list;
39+
unsigned int napi_id;
40+
struct sock *sk;
41+
struct io_napi_entry *e;
42+
43+
sk = sock->sk;
44+
if (!sk)
45+
return;
46+
47+
napi_id = READ_ONCE(sk->sk_napi_id);
48+
49+
/* Non-NAPI IDs can be rejected. */
50+
if (napi_id < MIN_NAPI_ID)
51+
return;
52+
53+
hash_list = &ctx->napi_ht[hash_min(napi_id, HASH_BITS(ctx->napi_ht))];
54+
55+
rcu_read_lock();
56+
e = io_napi_hash_find(hash_list, napi_id);
57+
if (e) {
58+
e->timeout = jiffies + NAPI_TIMEOUT;
59+
rcu_read_unlock();
60+
return;
61+
}
62+
rcu_read_unlock();
63+
64+
e = kmalloc(sizeof(*e), GFP_NOWAIT);
65+
if (!e)
66+
return;
67+
68+
e->napi_id = napi_id;
69+
e->timeout = jiffies + NAPI_TIMEOUT;
70+
71+
spin_lock(&ctx->napi_lock);
72+
if (unlikely(io_napi_hash_find(hash_list, napi_id))) {
73+
spin_unlock(&ctx->napi_lock);
74+
kfree(e);
75+
return;
76+
}
77+
78+
hlist_add_tail_rcu(&e->node, hash_list);
79+
list_add_tail(&e->list, &ctx->napi_list);
80+
spin_unlock(&ctx->napi_lock);
81+
}
82+
83+
static void __io_napi_remove_stale(struct io_ring_ctx *ctx)
84+
{
85+
struct io_napi_entry *e;
86+
unsigned int i;
87+
88+
spin_lock(&ctx->napi_lock);
89+
hash_for_each(ctx->napi_ht, i, e, node) {
90+
if (time_after(jiffies, e->timeout)) {
91+
list_del(&e->list);
92+
hash_del_rcu(&e->node);
93+
kfree_rcu(e, rcu);
94+
}
95+
}
96+
spin_unlock(&ctx->napi_lock);
97+
}
98+
99+
static inline void io_napi_remove_stale(struct io_ring_ctx *ctx, bool is_stale)
100+
{
101+
if (is_stale)
102+
__io_napi_remove_stale(ctx);
103+
}
104+
105+
static inline bool io_napi_busy_loop_timeout(unsigned long start_time,
106+
unsigned long bp_usec)
107+
{
108+
if (bp_usec) {
109+
unsigned long end_time = start_time + bp_usec;
110+
unsigned long now = busy_loop_current_time();
111+
112+
return time_after(now, end_time);
113+
}
114+
115+
return true;
116+
}
117+
118+
static bool io_napi_busy_loop_should_end(void *data,
119+
unsigned long start_time)
120+
{
121+
struct io_wait_queue *iowq = data;
122+
123+
if (signal_pending(current))
124+
return true;
125+
if (io_should_wake(iowq))
126+
return true;
127+
if (io_napi_busy_loop_timeout(start_time, iowq->napi_busy_poll_to))
128+
return true;
129+
130+
return false;
131+
}
132+
133+
static bool __io_napi_do_busy_loop(struct io_ring_ctx *ctx,
134+
void *loop_end_arg)
135+
{
136+
struct io_napi_entry *e;
137+
bool (*loop_end)(void *, unsigned long) = NULL;
138+
bool is_stale = false;
139+
140+
if (loop_end_arg)
141+
loop_end = io_napi_busy_loop_should_end;
142+
143+
list_for_each_entry_rcu(e, &ctx->napi_list, list) {
144+
napi_busy_loop_rcu(e->napi_id, loop_end, loop_end_arg,
145+
ctx->napi_prefer_busy_poll, BUSY_POLL_BUDGET);
146+
147+
if (time_after(jiffies, e->timeout))
148+
is_stale = true;
149+
}
150+
151+
return is_stale;
152+
}
153+
154+
static void io_napi_blocking_busy_loop(struct io_ring_ctx *ctx,
155+
struct io_wait_queue *iowq)
156+
{
157+
unsigned long start_time = busy_loop_current_time();
158+
void *loop_end_arg = NULL;
159+
bool is_stale = false;
160+
161+
/* Singular lists use a different napi loop end check function and are
162+
* only executed once.
163+
*/
164+
if (list_is_singular(&ctx->napi_list))
165+
loop_end_arg = iowq;
166+
167+
rcu_read_lock();
168+
do {
169+
is_stale = __io_napi_do_busy_loop(ctx, loop_end_arg);
170+
} while (!io_napi_busy_loop_should_end(iowq, start_time) && !loop_end_arg);
171+
rcu_read_unlock();
172+
173+
io_napi_remove_stale(ctx, is_stale);
174+
}
175+
176+
/*
177+
* io_napi_init() - Init napi settings
178+
* @ctx: pointer to io-uring context structure
179+
*
180+
* Init napi settings in the io-uring context.
181+
*/
182+
void io_napi_init(struct io_ring_ctx *ctx)
183+
{
184+
INIT_LIST_HEAD(&ctx->napi_list);
185+
spin_lock_init(&ctx->napi_lock);
186+
ctx->napi_prefer_busy_poll = false;
187+
ctx->napi_busy_poll_to = READ_ONCE(sysctl_net_busy_poll);
188+
}
189+
190+
/*
191+
* io_napi_free() - Deallocate napi
192+
* @ctx: pointer to io-uring context structure
193+
*
194+
* Free the napi list and the hash table in the io-uring context.
195+
*/
196+
void io_napi_free(struct io_ring_ctx *ctx)
197+
{
198+
struct io_napi_entry *e;
199+
LIST_HEAD(napi_list);
200+
unsigned int i;
201+
202+
spin_lock(&ctx->napi_lock);
203+
hash_for_each(ctx->napi_ht, i, e, node) {
204+
hash_del_rcu(&e->node);
205+
kfree_rcu(e, rcu);
206+
}
207+
spin_unlock(&ctx->napi_lock);
208+
}
209+
210+
/*
211+
* __io_napi_adjust_timeout() - Add napi id to the busy poll list
212+
* @ctx: pointer to io-uring context structure
213+
* @iowq: pointer to io wait queue
214+
* @ts: pointer to timespec or NULL
215+
*
216+
* Adjust the busy loop timeout according to timespec and busy poll timeout.
217+
*/
218+
void __io_napi_adjust_timeout(struct io_ring_ctx *ctx, struct io_wait_queue *iowq,
219+
struct timespec64 *ts)
220+
{
221+
unsigned int poll_to = READ_ONCE(ctx->napi_busy_poll_to);
222+
223+
if (ts) {
224+
struct timespec64 poll_to_ts = ns_to_timespec64(1000 * (s64)poll_to);
225+
226+
if (timespec64_compare(ts, &poll_to_ts) > 0) {
227+
*ts = timespec64_sub(*ts, poll_to_ts);
228+
} else {
229+
u64 to = timespec64_to_ns(ts);
230+
231+
do_div(to, 1000);
232+
ts->tv_sec = 0;
233+
ts->tv_nsec = 0;
234+
}
235+
}
236+
237+
iowq->napi_busy_poll_to = poll_to;
238+
}
239+
240+
/*
241+
* __io_napi_busy_loop() - execute busy poll loop
242+
* @ctx: pointer to io-uring context structure
243+
* @iowq: pointer to io wait queue
244+
*
245+
* Execute the busy poll loop and merge the spliced off list.
246+
*/
247+
void __io_napi_busy_loop(struct io_ring_ctx *ctx, struct io_wait_queue *iowq)
248+
{
249+
iowq->napi_prefer_busy_poll = READ_ONCE(ctx->napi_prefer_busy_poll);
250+
251+
if (!(ctx->flags & IORING_SETUP_SQPOLL) && iowq->napi_busy_poll_to)
252+
io_napi_blocking_busy_loop(ctx, iowq);
253+
}
254+
255+
#endif

0 commit comments

Comments
 (0)