Skip to content

Commit c84d949

Browse files
Eric Dumazetdavem330
authored andcommitted
udp: copy skb->truesize in the first cache line
In UDP RX handler, we currently clear skb->dev before skb is added to receive queue, because device pointer is no longer available once we exit from RCU section. Since this first cache line is always hot, lets reuse this space to store skb->truesize and thus avoid a cache line miss at udp_recvmsg()/udp_skb_destructor time while receive queue spinlock is held. Signed-off-by: Eric Dumazet <[email protected]> Signed-off-by: David S. Miller <[email protected]>
1 parent 4b27275 commit c84d949

File tree

2 files changed

+18
-4
lines changed

2 files changed

+18
-4
lines changed

include/linux/skbuff.h

Lines changed: 8 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -645,8 +645,15 @@ struct sk_buff {
645645
struct rb_node rbnode; /* used in netem & tcp stack */
646646
};
647647
struct sock *sk;
648-
struct net_device *dev;
649648

649+
union {
650+
struct net_device *dev;
651+
/* Some protocols might use this space to store information,
652+
* while device pointer would be NULL.
653+
* UDP receive path is one user.
654+
*/
655+
unsigned long dev_scratch;
656+
};
650657
/*
651658
* This is the control buffer. It is free to use for every
652659
* layer. Please put your private variables there. If you

net/ipv4/udp.c

Lines changed: 10 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1188,10 +1188,14 @@ static void udp_rmem_release(struct sock *sk, int size, int partial)
11881188
__sk_mem_reduce_allocated(sk, amt >> SK_MEM_QUANTUM_SHIFT);
11891189
}
11901190

1191-
/* Note: called with sk_receive_queue.lock held */
1191+
/* Note: called with sk_receive_queue.lock held.
1192+
* Instead of using skb->truesize here, find a copy of it in skb->dev_scratch
1193+
* This avoids a cache line miss while receive_queue lock is held.
1194+
* Look at __udp_enqueue_schedule_skb() to find where this copy is done.
1195+
*/
11921196
void udp_skb_destructor(struct sock *sk, struct sk_buff *skb)
11931197
{
1194-
udp_rmem_release(sk, skb->truesize, 1);
1198+
udp_rmem_release(sk, skb->dev_scratch, 1);
11951199
}
11961200
EXPORT_SYMBOL(udp_skb_destructor);
11971201

@@ -1246,6 +1250,10 @@ int __udp_enqueue_schedule_skb(struct sock *sk, struct sk_buff *skb)
12461250
busy = busylock_acquire(sk);
12471251
}
12481252
size = skb->truesize;
1253+
/* Copy skb->truesize into skb->dev_scratch to avoid a cache line miss
1254+
* in udp_skb_destructor()
1255+
*/
1256+
skb->dev_scratch = size;
12491257

12501258
/* we drop only if the receive buf is full and the receive
12511259
* queue contains some other skb
@@ -1272,7 +1280,6 @@ int __udp_enqueue_schedule_skb(struct sock *sk, struct sk_buff *skb)
12721280
/* no need to setup a destructor, we will explicitly release the
12731281
* forward allocated memory on dequeue
12741282
*/
1275-
skb->dev = NULL;
12761283
sock_skb_set_dropcount(sk, skb);
12771284

12781285
__skb_queue_tail(list, skb);

0 commit comments

Comments
 (0)