@@ -524,31 +524,6 @@ static inline unsigned int run_filter(struct sk_buff *skb, struct sock *sk,
524
524
return res ;
525
525
}
526
526
527
- /*
528
- * If we've lost frames since the last time we queued one to the
529
- * sk_receive_queue, we need to record it here.
530
- * This must be called under the protection of the socket lock
531
- * to prevent racing with other softirqs and user space
532
- */
533
- static inline void record_packet_gap (struct sk_buff * skb ,
534
- struct packet_sock * po )
535
- {
536
- /*
537
- * We overload the mark field here, since we're about
538
- * to enqueue to a receive queue and no body else will
539
- * use this field at this point
540
- */
541
- skb -> mark = po -> stats .tp_gap ;
542
- po -> stats .tp_gap = 0 ;
543
- return ;
544
-
545
- }
546
-
547
- static inline __u32 check_packet_gap (struct sk_buff * skb )
548
- {
549
- return skb -> mark ;
550
- }
551
-
552
527
/*
553
528
This function makes lazy skb cloning in hope that most of packets
554
529
are discarded by BPF.
@@ -652,7 +627,6 @@ static int packet_rcv(struct sk_buff *skb, struct net_device *dev,
652
627
653
628
spin_lock (& sk -> sk_receive_queue .lock );
654
629
po -> stats .tp_packets ++ ;
655
- record_packet_gap (skb , po );
656
630
__skb_queue_tail (& sk -> sk_receive_queue , skb );
657
631
spin_unlock (& sk -> sk_receive_queue .lock );
658
632
sk -> sk_data_ready (sk , skb -> len );
@@ -661,7 +635,6 @@ static int packet_rcv(struct sk_buff *skb, struct net_device *dev,
661
635
drop_n_acct :
662
636
spin_lock (& sk -> sk_receive_queue .lock );
663
637
po -> stats .tp_drops ++ ;
664
- po -> stats .tp_gap ++ ;
665
638
spin_unlock (& sk -> sk_receive_queue .lock );
666
639
667
640
drop_n_restore :
@@ -839,7 +812,6 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
839
812
840
813
ring_is_full :
841
814
po -> stats .tp_drops ++ ;
842
- po -> stats .tp_gap ++ ;
843
815
spin_unlock (& sk -> sk_receive_queue .lock );
844
816
845
817
sk -> sk_data_ready (sk , 0 );
@@ -1449,7 +1421,6 @@ static int packet_recvmsg(struct kiocb *iocb, struct socket *sock,
1449
1421
struct sk_buff * skb ;
1450
1422
int copied , err ;
1451
1423
struct sockaddr_ll * sll ;
1452
- __u32 gap ;
1453
1424
1454
1425
err = - EINVAL ;
1455
1426
if (flags & ~(MSG_PEEK |MSG_DONTWAIT |MSG_TRUNC |MSG_CMSG_COMPAT ))
@@ -1528,10 +1499,6 @@ static int packet_recvmsg(struct kiocb *iocb, struct socket *sock,
1528
1499
put_cmsg (msg , SOL_PACKET , PACKET_AUXDATA , sizeof (aux ), & aux );
1529
1500
}
1530
1501
1531
- gap = check_packet_gap (skb );
1532
- if (gap )
1533
- put_cmsg (msg , SOL_PACKET , PACKET_GAPDATA , sizeof (__u32 ), & gap );
1534
-
1535
1502
/*
1536
1503
* Free or return the buffer as appropriate. Again this
1537
1504
* hides all the races and re-entrancy issues from us.
0 commit comments