Skip to content

Commit 7387943

Browse files
zx2c4davem330
authored andcommitted
wireguard: queueing: use saner cpu selection wrapping
Using `% nr_cpumask_bits` is slow and complicated, and not totally robust toward dynamic changes to CPU topologies. Rather than storing the next CPU in the round-robin, just store the last one, and also return that value. This simplifies the loop drastically into a much more common pattern. Fixes: e7096c1 ("net: WireGuard secure network tunnel") Cc: [email protected] Reported-by: Linus Torvalds <[email protected]> Tested-by: Manuel Leiner <[email protected]> Signed-off-by: Jason A. Donenfeld <[email protected]> Signed-off-by: David S. Miller <[email protected]>
1 parent a27ac53 commit 7387943

File tree

4 files changed

+14
-16
lines changed

4 files changed

+14
-16
lines changed

drivers/net/wireguard/queueing.c

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -28,6 +28,7 @@ int wg_packet_queue_init(struct crypt_queue *queue, work_func_t function,
2828
int ret;
2929

3030
memset(queue, 0, sizeof(*queue));
31+
queue->last_cpu = -1;
3132
ret = ptr_ring_init(&queue->ring, len, GFP_KERNEL);
3233
if (ret)
3334
return ret;

drivers/net/wireguard/queueing.h

Lines changed: 11 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -117,20 +117,17 @@ static inline int wg_cpumask_choose_online(int *stored_cpu, unsigned int id)
117117
return cpu;
118118
}
119119

120-
/* This function is racy, in the sense that next is unlocked, so it could return
121-
* the same CPU twice. A race-free version of this would be to instead store an
122-
* atomic sequence number, do an increment-and-return, and then iterate through
123-
* every possible CPU until we get to that index -- choose_cpu. However that's
124-
* a bit slower, and it doesn't seem like this potential race actually
125-
* introduces any performance loss, so we live with it.
120+
/* This function is racy, in the sense that it's called while last_cpu is
121+
* unlocked, so it could return the same CPU twice. Adding locking or using
122+
* atomic sequence numbers is slower though, and the consequences of racing are
123+
* harmless, so live with it.
126124
*/
127-
static inline int wg_cpumask_next_online(int *next)
125+
static inline int wg_cpumask_next_online(int *last_cpu)
128126
{
129-
int cpu = *next;
130-
131-
while (unlikely(!cpumask_test_cpu(cpu, cpu_online_mask)))
132-
cpu = cpumask_next(cpu, cpu_online_mask) % nr_cpumask_bits;
133-
*next = cpumask_next(cpu, cpu_online_mask) % nr_cpumask_bits;
127+
int cpu = cpumask_next(*last_cpu, cpu_online_mask);
128+
if (cpu >= nr_cpu_ids)
129+
cpu = cpumask_first(cpu_online_mask);
130+
*last_cpu = cpu;
134131
return cpu;
135132
}
136133

@@ -159,7 +156,7 @@ static inline void wg_prev_queue_drop_peeked(struct prev_queue *queue)
159156

160157
static inline int wg_queue_enqueue_per_device_and_peer(
161158
struct crypt_queue *device_queue, struct prev_queue *peer_queue,
162-
struct sk_buff *skb, struct workqueue_struct *wq, int *next_cpu)
159+
struct sk_buff *skb, struct workqueue_struct *wq)
163160
{
164161
int cpu;
165162

@@ -173,7 +170,7 @@ static inline int wg_queue_enqueue_per_device_and_peer(
173170
/* Then we queue it up in the device queue, which consumes the
174171
* packet as soon as it can.
175172
*/
176-
cpu = wg_cpumask_next_online(next_cpu);
173+
cpu = wg_cpumask_next_online(&device_queue->last_cpu);
177174
if (unlikely(ptr_ring_produce_bh(&device_queue->ring, skb)))
178175
return -EPIPE;
179176
queue_work_on(cpu, wq, &per_cpu_ptr(device_queue->worker, cpu)->work);

drivers/net/wireguard/receive.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -524,7 +524,7 @@ static void wg_packet_consume_data(struct wg_device *wg, struct sk_buff *skb)
524524
goto err;
525525

526526
ret = wg_queue_enqueue_per_device_and_peer(&wg->decrypt_queue, &peer->rx_queue, skb,
527-
wg->packet_crypt_wq, &wg->decrypt_queue.last_cpu);
527+
wg->packet_crypt_wq);
528528
if (unlikely(ret == -EPIPE))
529529
wg_queue_enqueue_per_peer_rx(skb, PACKET_STATE_DEAD);
530530
if (likely(!ret || ret == -EPIPE)) {

drivers/net/wireguard/send.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -318,7 +318,7 @@ static void wg_packet_create_data(struct wg_peer *peer, struct sk_buff *first)
318318
goto err;
319319

320320
ret = wg_queue_enqueue_per_device_and_peer(&wg->encrypt_queue, &peer->tx_queue, first,
321-
wg->packet_crypt_wq, &wg->encrypt_queue.last_cpu);
321+
wg->packet_crypt_wq);
322322
if (unlikely(ret == -EPIPE))
323323
wg_queue_enqueue_per_peer_tx(first, PACKET_STATE_DEAD);
324324
err:

0 commit comments

Comments
 (0)