Skip to content

Commit f6d8cb2

Browse files
edumazetdavem330
authored andcommitted
inet: reduce TLB pressure for listeners
It seems overkill to use vmalloc() for typical listeners with less than 2048 hash buckets. Try kmalloc() and fallback to vmalloc() to reduce TLB pressure. Use kvfree() helper as it is now available. Use ilog2() instead of a loop. Signed-off-by: Eric Dumazet <[email protected]> Signed-off-by: David S. Miller <[email protected]>
1 parent bb446c1 commit f6d8cb2

File tree

1 file changed

+12
-31
lines changed

1 file changed

+12
-31
lines changed

net/core/request_sock.c

Lines changed: 12 additions & 31 deletions
Original file line numberDiff line numberDiff line change
@@ -41,27 +41,27 @@ int reqsk_queue_alloc(struct request_sock_queue *queue,
4141
unsigned int nr_table_entries)
4242
{
4343
size_t lopt_size = sizeof(struct listen_sock);
44-
struct listen_sock *lopt;
44+
struct listen_sock *lopt = NULL;
4545

4646
nr_table_entries = min_t(u32, nr_table_entries, sysctl_max_syn_backlog);
4747
nr_table_entries = max_t(u32, nr_table_entries, 8);
4848
nr_table_entries = roundup_pow_of_two(nr_table_entries + 1);
4949
lopt_size += nr_table_entries * sizeof(struct request_sock *);
50-
if (lopt_size > PAGE_SIZE)
50+
51+
if (lopt_size <= (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER))
52+
lopt = kzalloc(lopt_size, GFP_KERNEL |
53+
__GFP_NOWARN |
54+
__GFP_NORETRY);
55+
if (!lopt)
5156
lopt = vzalloc(lopt_size);
52-
else
53-
lopt = kzalloc(lopt_size, GFP_KERNEL);
54-
if (lopt == NULL)
57+
if (!lopt)
5558
return -ENOMEM;
5659

57-
for (lopt->max_qlen_log = 3;
58-
(1 << lopt->max_qlen_log) < nr_table_entries;
59-
lopt->max_qlen_log++);
60-
6160
get_random_bytes(&lopt->hash_rnd, sizeof(lopt->hash_rnd));
6261
rwlock_init(&queue->syn_wait_lock);
6362
queue->rskq_accept_head = NULL;
6463
lopt->nr_table_entries = nr_table_entries;
64+
lopt->max_qlen_log = ilog2(nr_table_entries);
6565

6666
write_lock_bh(&queue->syn_wait_lock);
6767
queue->listen_opt = lopt;
@@ -72,22 +72,8 @@ int reqsk_queue_alloc(struct request_sock_queue *queue,
7272

7373
void __reqsk_queue_destroy(struct request_sock_queue *queue)
7474
{
75-
struct listen_sock *lopt;
76-
size_t lopt_size;
77-
78-
/*
79-
* this is an error recovery path only
80-
* no locking needed and the lopt is not NULL
81-
*/
82-
83-
lopt = queue->listen_opt;
84-
lopt_size = sizeof(struct listen_sock) +
85-
lopt->nr_table_entries * sizeof(struct request_sock *);
86-
87-
if (lopt_size > PAGE_SIZE)
88-
vfree(lopt);
89-
else
90-
kfree(lopt);
75+
/* This is an error recovery path only, no locking needed */
76+
kvfree(queue->listen_opt);
9177
}
9278

9379
static inline struct listen_sock *reqsk_queue_yank_listen_sk(
@@ -107,8 +93,6 @@ void reqsk_queue_destroy(struct request_sock_queue *queue)
10793
{
10894
/* make all the listen_opt local to us */
10995
struct listen_sock *lopt = reqsk_queue_yank_listen_sk(queue);
110-
size_t lopt_size = sizeof(struct listen_sock) +
111-
lopt->nr_table_entries * sizeof(struct request_sock *);
11296

11397
if (lopt->qlen != 0) {
11498
unsigned int i;
@@ -125,10 +109,7 @@ void reqsk_queue_destroy(struct request_sock_queue *queue)
125109
}
126110

127111
WARN_ON(lopt->qlen != 0);
128-
if (lopt_size > PAGE_SIZE)
129-
vfree(lopt);
130-
else
131-
kfree(lopt);
112+
kvfree(lopt);
132113
}
133114

134115
/*

0 commit comments

Comments
 (0)