File tree Expand file tree Collapse file tree 1 file changed +6
-5
lines changed Expand file tree Collapse file tree 1 file changed +6
-5
lines changed Original file line number Diff line number Diff line change @@ -1492,13 +1492,14 @@ int __udp_enqueue_schedule_skb(struct sock *sk, struct sk_buff *skb)
1492
1492
struct sk_buff_head * list = & sk -> sk_receive_queue ;
1493
1493
int rmem , err = - ENOMEM ;
1494
1494
spinlock_t * busy = NULL ;
1495
- int size ;
1495
+ int size , rcvbuf ;
1496
1496
1497
- /* try to avoid the costly atomic add/sub pair when the receive
1498
- * queue is full; always allow at least a packet
1497
+ /* Immediately drop when the receive queue is full.
1498
+ * Always allow at least one packet.
1499
1499
*/
1500
1500
rmem = atomic_read (& sk -> sk_rmem_alloc );
1501
- if (rmem > sk -> sk_rcvbuf )
1501
+ rcvbuf = READ_ONCE (sk -> sk_rcvbuf );
1502
+ if (rmem > rcvbuf )
1502
1503
goto drop ;
1503
1504
1504
1505
/* Under mem pressure, it might be helpful to help udp_recvmsg()
@@ -1507,7 +1508,7 @@ int __udp_enqueue_schedule_skb(struct sock *sk, struct sk_buff *skb)
1507
1508
* - Less cache line misses at copyout() time
1508
1509
* - Less work at consume_skb() (less alien page frag freeing)
1509
1510
*/
1510
- if (rmem > (sk -> sk_rcvbuf >> 1 )) {
1511
+ if (rmem > (rcvbuf >> 1 )) {
1511
1512
skb_condense (skb );
1512
1513
1513
1514
busy = busylock_acquire (sk );
You can’t perform that action at this time.
0 commit comments