@@ -586,7 +586,8 @@ static struct sk_buff *receive_small(struct net_device *dev,
586
586
struct receive_queue * rq ,
587
587
void * buf , void * ctx ,
588
588
unsigned int len ,
589
- unsigned int * xdp_xmit )
589
+ unsigned int * xdp_xmit ,
590
+ unsigned int * rbytes )
590
591
{
591
592
struct sk_buff * skb ;
592
593
struct bpf_prog * xdp_prog ;
@@ -601,6 +602,7 @@ static struct sk_buff *receive_small(struct net_device *dev,
601
602
int err ;
602
603
603
604
len -= vi -> hdr_len ;
605
+ * rbytes += len ;
604
606
605
607
rcu_read_lock ();
606
608
xdp_prog = rcu_dereference (rq -> xdp_prog );
@@ -705,11 +707,13 @@ static struct sk_buff *receive_big(struct net_device *dev,
705
707
struct virtnet_info * vi ,
706
708
struct receive_queue * rq ,
707
709
void * buf ,
708
- unsigned int len )
710
+ unsigned int len ,
711
+ unsigned int * rbytes )
709
712
{
710
713
struct page * page = buf ;
711
714
struct sk_buff * skb = page_to_skb (vi , rq , page , 0 , len , PAGE_SIZE );
712
715
716
+ * rbytes += len - vi -> hdr_len ;
713
717
if (unlikely (!skb ))
714
718
goto err ;
715
719
@@ -727,7 +731,8 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
727
731
void * buf ,
728
732
void * ctx ,
729
733
unsigned int len ,
730
- unsigned int * xdp_xmit )
734
+ unsigned int * xdp_xmit ,
735
+ unsigned int * rbytes )
731
736
{
732
737
struct virtio_net_hdr_mrg_rxbuf * hdr = buf ;
733
738
u16 num_buf = virtio16_to_cpu (vi -> vdev , hdr -> num_buffers );
@@ -740,6 +745,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
740
745
int err ;
741
746
742
747
head_skb = NULL ;
748
+ * rbytes += len - vi -> hdr_len ;
743
749
744
750
rcu_read_lock ();
745
751
xdp_prog = rcu_dereference (rq -> xdp_prog );
@@ -877,6 +883,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
877
883
goto err_buf ;
878
884
}
879
885
886
+ * rbytes += len ;
880
887
page = virt_to_head_page (buf );
881
888
882
889
truesize = mergeable_ctx_to_truesize (ctx );
@@ -932,6 +939,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
932
939
dev -> stats .rx_length_errors ++ ;
933
940
break ;
934
941
}
942
+ * rbytes += len ;
935
943
page = virt_to_head_page (buf );
936
944
put_page (page );
937
945
}
@@ -942,14 +950,13 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
942
950
return NULL ;
943
951
}
944
952
945
- static int receive_buf (struct virtnet_info * vi , struct receive_queue * rq ,
946
- void * buf , unsigned int len , void * * ctx ,
947
- unsigned int * xdp_xmit )
953
+ static void receive_buf (struct virtnet_info * vi , struct receive_queue * rq ,
954
+ void * buf , unsigned int len , void * * ctx ,
955
+ unsigned int * xdp_xmit , unsigned int * rbytes )
948
956
{
949
957
struct net_device * dev = vi -> dev ;
950
958
struct sk_buff * skb ;
951
959
struct virtio_net_hdr_mrg_rxbuf * hdr ;
952
- int ret ;
953
960
954
961
if (unlikely (len < vi -> hdr_len + ETH_HLEN )) {
955
962
pr_debug ("%s: short packet %i\n" , dev -> name , len );
@@ -961,23 +968,22 @@ static int receive_buf(struct virtnet_info *vi, struct receive_queue *rq,
961
968
} else {
962
969
put_page (virt_to_head_page (buf ));
963
970
}
964
- return 0 ;
971
+ return ;
965
972
}
966
973
967
974
if (vi -> mergeable_rx_bufs )
968
- skb = receive_mergeable (dev , vi , rq , buf , ctx , len , xdp_xmit );
975
+ skb = receive_mergeable (dev , vi , rq , buf , ctx , len , xdp_xmit ,
976
+ rbytes );
969
977
else if (vi -> big_packets )
970
- skb = receive_big (dev , vi , rq , buf , len );
978
+ skb = receive_big (dev , vi , rq , buf , len , rbytes );
971
979
else
972
- skb = receive_small (dev , vi , rq , buf , ctx , len , xdp_xmit );
980
+ skb = receive_small (dev , vi , rq , buf , ctx , len , xdp_xmit , rbytes );
973
981
974
982
if (unlikely (!skb ))
975
- return 0 ;
983
+ return ;
976
984
977
985
hdr = skb_vnet_hdr (skb );
978
986
979
- ret = skb -> len ;
980
-
981
987
if (hdr -> hdr .flags & VIRTIO_NET_HDR_F_DATA_VALID )
982
988
skb -> ip_summed = CHECKSUM_UNNECESSARY ;
983
989
@@ -994,12 +1000,11 @@ static int receive_buf(struct virtnet_info *vi, struct receive_queue *rq,
994
1000
ntohs (skb -> protocol ), skb -> len , skb -> pkt_type );
995
1001
996
1002
napi_gro_receive (& rq -> napi , skb );
997
- return ret ;
1003
+ return ;
998
1004
999
1005
frame_err :
1000
1006
dev -> stats .rx_frame_errors ++ ;
1001
1007
dev_kfree_skb (skb );
1002
- return 0 ;
1003
1008
}
1004
1009
1005
1010
/* Unlike mergeable buffers, all buffers are allocated to the
@@ -1249,13 +1254,13 @@ static int virtnet_receive(struct receive_queue *rq, int budget,
1249
1254
1250
1255
while (received < budget &&
1251
1256
(buf = virtqueue_get_buf_ctx (rq -> vq , & len , & ctx ))) {
1252
- bytes += receive_buf (vi , rq , buf , len , ctx , xdp_xmit );
1257
+ receive_buf (vi , rq , buf , len , ctx , xdp_xmit , & bytes );
1253
1258
received ++ ;
1254
1259
}
1255
1260
} else {
1256
1261
while (received < budget &&
1257
1262
(buf = virtqueue_get_buf (rq -> vq , & len )) != NULL ) {
1258
- bytes += receive_buf (vi , rq , buf , len , NULL , xdp_xmit );
1263
+ receive_buf (vi , rq , buf , len , NULL , xdp_xmit , & bytes );
1259
1264
received ++ ;
1260
1265
}
1261
1266
}
0 commit comments