29
29
#include <linux/slab.h>
30
30
#include <linux/cpu.h>
31
31
#include <linux/average.h>
32
+ #include <linux/filter.h>
32
33
#include <net/route.h>
33
34
34
35
static int napi_weight = NAPI_POLL_WEIGHT ;
@@ -372,8 +373,20 @@ static struct sk_buff *page_to_skb(struct virtnet_info *vi,
372
373
return skb ;
373
374
}
374
375
375
- static bool virtnet_xdp_xmit (struct virtnet_info * vi ,
376
- struct xdp_buff * xdp )
376
+ static void virtnet_xdp_flush (struct net_device * dev )
377
+ {
378
+ struct virtnet_info * vi = netdev_priv (dev );
379
+ struct send_queue * sq ;
380
+ unsigned int qp ;
381
+
382
+ qp = vi -> curr_queue_pairs - vi -> xdp_queue_pairs + smp_processor_id ();
383
+ sq = & vi -> sq [qp ];
384
+
385
+ virtqueue_kick (sq -> vq );
386
+ }
387
+
388
+ static bool __virtnet_xdp_xmit (struct virtnet_info * vi ,
389
+ struct xdp_buff * xdp )
377
390
{
378
391
struct virtio_net_hdr_mrg_rxbuf * hdr ;
379
392
unsigned int len ;
@@ -407,10 +420,19 @@ static bool virtnet_xdp_xmit(struct virtnet_info *vi,
407
420
return false;
408
421
}
409
422
410
- virtqueue_kick (sq -> vq );
411
423
return true;
412
424
}
413
425
426
+ static int virtnet_xdp_xmit (struct net_device * dev , struct xdp_buff * xdp )
427
+ {
428
+ struct virtnet_info * vi = netdev_priv (dev );
429
+ bool sent = __virtnet_xdp_xmit (vi , xdp );
430
+
431
+ if (!sent )
432
+ return - ENOSPC ;
433
+ return 0 ;
434
+ }
435
+
414
436
static unsigned int virtnet_get_headroom (struct virtnet_info * vi )
415
437
{
416
438
return vi -> xdp_queue_pairs ? VIRTIO_XDP_HEADROOM : 0 ;
@@ -483,7 +505,8 @@ static struct sk_buff *receive_small(struct net_device *dev,
483
505
struct virtnet_info * vi ,
484
506
struct receive_queue * rq ,
485
507
void * buf , void * ctx ,
486
- unsigned int len )
508
+ unsigned int len ,
509
+ bool * xdp_xmit )
487
510
{
488
511
struct sk_buff * skb ;
489
512
struct bpf_prog * xdp_prog ;
@@ -493,7 +516,7 @@ static struct sk_buff *receive_small(struct net_device *dev,
493
516
unsigned int buflen = SKB_DATA_ALIGN (GOOD_PACKET_LEN + headroom ) +
494
517
SKB_DATA_ALIGN (sizeof (struct skb_shared_info ));
495
518
struct page * page = virt_to_head_page (buf );
496
- unsigned int delta = 0 ;
519
+ unsigned int delta = 0 , err ;
497
520
struct page * xdp_page ;
498
521
len -= vi -> hdr_len ;
499
522
@@ -541,8 +564,16 @@ static struct sk_buff *receive_small(struct net_device *dev,
541
564
delta = orig_data - xdp .data ;
542
565
break ;
543
566
case XDP_TX :
544
- if (unlikely (!virtnet_xdp_xmit (vi , & xdp )))
567
+ if (unlikely (!__virtnet_xdp_xmit (vi , & xdp )))
545
568
trace_xdp_exception (vi -> dev , xdp_prog , act );
569
+ else
570
+ * xdp_xmit = true;
571
+ rcu_read_unlock ();
572
+ goto xdp_xmit ;
573
+ case XDP_REDIRECT :
574
+ err = xdp_do_redirect (dev , & xdp , xdp_prog );
575
+ if (!err )
576
+ * xdp_xmit = true;
546
577
rcu_read_unlock ();
547
578
goto xdp_xmit ;
548
579
default :
@@ -603,7 +634,8 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
603
634
struct receive_queue * rq ,
604
635
void * buf ,
605
636
void * ctx ,
606
- unsigned int len )
637
+ unsigned int len ,
638
+ bool * xdp_xmit )
607
639
{
608
640
struct virtio_net_hdr_mrg_rxbuf * hdr = buf ;
609
641
u16 num_buf = virtio16_to_cpu (vi -> vdev , hdr -> num_buffers );
@@ -613,6 +645,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
613
645
struct bpf_prog * xdp_prog ;
614
646
unsigned int truesize ;
615
647
unsigned int headroom = mergeable_ctx_to_headroom (ctx );
648
+ int err ;
616
649
617
650
head_skb = NULL ;
618
651
@@ -678,12 +711,20 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
678
711
}
679
712
break ;
680
713
case XDP_TX :
681
- if (unlikely (!virtnet_xdp_xmit (vi , & xdp )))
714
+ if (unlikely (!__virtnet_xdp_xmit (vi , & xdp )))
682
715
trace_xdp_exception (vi -> dev , xdp_prog , act );
716
+ else
717
+ * xdp_xmit = true;
683
718
if (unlikely (xdp_page != page ))
684
719
goto err_xdp ;
685
720
rcu_read_unlock ();
686
721
goto xdp_xmit ;
722
+ case XDP_REDIRECT :
723
+ err = xdp_do_redirect (dev , & xdp , xdp_prog );
724
+ if (err )
725
+ * xdp_xmit = true;
726
+ rcu_read_unlock ();
727
+ goto xdp_xmit ;
687
728
default :
688
729
bpf_warn_invalid_xdp_action (act );
689
730
case XDP_ABORTED :
@@ -788,7 +829,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
788
829
}
789
830
790
831
static int receive_buf (struct virtnet_info * vi , struct receive_queue * rq ,
791
- void * buf , unsigned int len , void * * ctx )
832
+ void * buf , unsigned int len , void * * ctx , bool * xdp_xmit )
792
833
{
793
834
struct net_device * dev = vi -> dev ;
794
835
struct sk_buff * skb ;
@@ -809,11 +850,11 @@ static int receive_buf(struct virtnet_info *vi, struct receive_queue *rq,
809
850
}
810
851
811
852
if (vi -> mergeable_rx_bufs )
812
- skb = receive_mergeable (dev , vi , rq , buf , ctx , len );
853
+ skb = receive_mergeable (dev , vi , rq , buf , ctx , len , xdp_xmit );
813
854
else if (vi -> big_packets )
814
855
skb = receive_big (dev , vi , rq , buf , len );
815
856
else
816
- skb = receive_small (dev , vi , rq , buf , ctx , len );
857
+ skb = receive_small (dev , vi , rq , buf , ctx , len , xdp_xmit );
817
858
818
859
if (unlikely (!skb ))
819
860
return 0 ;
@@ -1071,7 +1112,7 @@ static void refill_work(struct work_struct *work)
1071
1112
}
1072
1113
}
1073
1114
1074
- static int virtnet_receive (struct receive_queue * rq , int budget )
1115
+ static int virtnet_receive (struct receive_queue * rq , int budget , bool * xdp_xmit )
1075
1116
{
1076
1117
struct virtnet_info * vi = rq -> vq -> vdev -> priv ;
1077
1118
unsigned int len , received = 0 , bytes = 0 ;
@@ -1083,13 +1124,13 @@ static int virtnet_receive(struct receive_queue *rq, int budget)
1083
1124
1084
1125
while (received < budget &&
1085
1126
(buf = virtqueue_get_buf_ctx (rq -> vq , & len , & ctx ))) {
1086
- bytes += receive_buf (vi , rq , buf , len , ctx );
1127
+ bytes += receive_buf (vi , rq , buf , len , ctx , xdp_xmit );
1087
1128
received ++ ;
1088
1129
}
1089
1130
} else {
1090
1131
while (received < budget &&
1091
1132
(buf = virtqueue_get_buf (rq -> vq , & len )) != NULL ) {
1092
- bytes += receive_buf (vi , rq , buf , len , NULL );
1133
+ bytes += receive_buf (vi , rq , buf , len , NULL , xdp_xmit );
1093
1134
received ++ ;
1094
1135
}
1095
1136
}
@@ -1161,15 +1202,19 @@ static int virtnet_poll(struct napi_struct *napi, int budget)
1161
1202
struct receive_queue * rq =
1162
1203
container_of (napi , struct receive_queue , napi );
1163
1204
unsigned int received ;
1205
+ bool xdp_xmit = false;
1164
1206
1165
1207
virtnet_poll_cleantx (rq );
1166
1208
1167
- received = virtnet_receive (rq , budget );
1209
+ received = virtnet_receive (rq , budget , & xdp_xmit );
1168
1210
1169
1211
/* Out of packets? */
1170
1212
if (received < budget )
1171
1213
virtqueue_napi_complete (napi , rq -> vq , received );
1172
1214
1215
+ if (xdp_xmit )
1216
+ xdp_do_flush_map ();
1217
+
1173
1218
return received ;
1174
1219
}
1175
1220
@@ -2069,6 +2114,8 @@ static const struct net_device_ops virtnet_netdev = {
2069
2114
.ndo_poll_controller = virtnet_netpoll ,
2070
2115
#endif
2071
2116
.ndo_xdp = virtnet_xdp ,
2117
+ .ndo_xdp_xmit = virtnet_xdp_xmit ,
2118
+ .ndo_xdp_flush = virtnet_xdp_flush ,
2072
2119
.ndo_features_check = passthru_features_check ,
2073
2120
};
2074
2121
0 commit comments