@@ -270,8 +270,11 @@ static noinline struct sk_buff *nf_hook_direct_egress(struct sk_buff *skb)
270
270
}
271
271
#endif
272
272
273
- static int packet_direct_xmit ( struct sk_buff * skb )
273
+ static int packet_xmit ( const struct packet_sock * po , struct sk_buff * skb )
274
274
{
275
+ if (!packet_sock_flag (po , PACKET_SOCK_QDISC_BYPASS ))
276
+ return dev_queue_xmit (skb );
277
+
275
278
#ifdef CONFIG_NETFILTER_EGRESS
276
279
if (nf_hook_egress_active ()) {
277
280
skb = nf_hook_direct_egress (skb );
@@ -305,12 +308,6 @@ static void packet_cached_dev_reset(struct packet_sock *po)
305
308
RCU_INIT_POINTER (po -> cached_dev , NULL );
306
309
}
307
310
308
- static bool packet_use_direct_xmit (const struct packet_sock * po )
309
- {
310
- /* Paired with WRITE_ONCE() in packet_setsockopt() */
311
- return READ_ONCE (po -> xmit ) == packet_direct_xmit ;
312
- }
313
-
314
311
static u16 packet_pick_tx_queue (struct sk_buff * skb )
315
312
{
316
313
struct net_device * dev = skb -> dev ;
@@ -2872,8 +2869,7 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
2872
2869
packet_inc_pending (& po -> tx_ring );
2873
2870
2874
2871
status = TP_STATUS_SEND_REQUEST ;
2875
- /* Paired with WRITE_ONCE() in packet_setsockopt() */
2876
- err = READ_ONCE (po -> xmit )(skb );
2872
+ err = packet_xmit (po , skb );
2877
2873
if (unlikely (err != 0 )) {
2878
2874
if (err > 0 )
2879
2875
err = net_xmit_errno (err );
@@ -3076,8 +3072,8 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
3076
3072
virtio_net_hdr_set_proto (skb , & vnet_hdr );
3077
3073
}
3078
3074
3079
- /* Paired with WRITE_ONCE() in packet_setsockopt() */
3080
- err = READ_ONCE ( po -> xmit )( skb );
3075
+ err = packet_xmit ( po , skb );
3076
+
3081
3077
if (unlikely (err != 0 )) {
3082
3078
if (err > 0 )
3083
3079
err = net_xmit_errno (err );
@@ -3359,7 +3355,6 @@ static int packet_create(struct net *net, struct socket *sock, int protocol,
3359
3355
init_completion (& po -> skb_completion );
3360
3356
sk -> sk_family = PF_PACKET ;
3361
3357
po -> num = proto ;
3362
- po -> xmit = dev_queue_xmit ;
3363
3358
3364
3359
err = packet_alloc_pending (po );
3365
3360
if (err )
@@ -4010,8 +4005,7 @@ packet_setsockopt(struct socket *sock, int level, int optname, sockptr_t optval,
4010
4005
if (copy_from_sockptr (& val , optval , sizeof (val )))
4011
4006
return - EFAULT ;
4012
4007
4013
- /* Paired with all lockless reads of po->xmit */
4014
- WRITE_ONCE (po -> xmit , val ? packet_direct_xmit : dev_queue_xmit );
4008
+ packet_sock_flag_set (po , PACKET_SOCK_QDISC_BYPASS , val );
4015
4009
return 0 ;
4016
4010
}
4017
4011
default :
@@ -4126,7 +4120,7 @@ static int packet_getsockopt(struct socket *sock, int level, int optname,
4126
4120
val = packet_sock_flag (po , PACKET_SOCK_TX_HAS_OFF );
4127
4121
break ;
4128
4122
case PACKET_QDISC_BYPASS :
4129
- val = packet_use_direct_xmit (po );
4123
+ val = packet_sock_flag (po , PACKET_SOCK_QDISC_BYPASS );
4130
4124
break ;
4131
4125
default :
4132
4126
return - ENOPROTOOPT ;
0 commit comments