@@ -660,23 +660,6 @@ static void tsnep_rx_ring_cleanup(struct tsnep_rx *rx)
660
660
}
661
661
}
662
662
663
- static int tsnep_rx_alloc_buffer (struct tsnep_rx * rx ,
664
- struct tsnep_rx_entry * entry )
665
- {
666
- struct page * page ;
667
-
668
- page = page_pool_dev_alloc_pages (rx -> page_pool );
669
- if (unlikely (!page ))
670
- return - ENOMEM ;
671
-
672
- entry -> page = page ;
673
- entry -> len = TSNEP_MAX_RX_BUF_SIZE ;
674
- entry -> dma = page_pool_get_dma_addr (entry -> page );
675
- entry -> desc -> rx = __cpu_to_le64 (entry -> dma + TSNEP_SKB_PAD );
676
-
677
- return 0 ;
678
- }
679
-
680
663
static int tsnep_rx_ring_init (struct tsnep_rx * rx )
681
664
{
682
665
struct device * dmadev = rx -> adapter -> dmadev ;
@@ -723,10 +706,6 @@ static int tsnep_rx_ring_init(struct tsnep_rx *rx)
723
706
entry = & rx -> entry [i ];
724
707
next_entry = & rx -> entry [(i + 1 ) % TSNEP_RING_SIZE ];
725
708
entry -> desc -> next = __cpu_to_le64 (next_entry -> desc_dma );
726
-
727
- retval = tsnep_rx_alloc_buffer (rx , entry );
728
- if (retval )
729
- goto failed ;
730
709
}
731
710
732
711
return 0 ;
@@ -736,6 +715,45 @@ static int tsnep_rx_ring_init(struct tsnep_rx *rx)
736
715
return retval ;
737
716
}
738
717
718
+ static int tsnep_rx_desc_available (struct tsnep_rx * rx )
719
+ {
720
+ if (rx -> read <= rx -> write )
721
+ return TSNEP_RING_SIZE - rx -> write + rx -> read - 1 ;
722
+ else
723
+ return rx -> read - rx -> write - 1 ;
724
+ }
725
+
726
+ static void tsnep_rx_set_page (struct tsnep_rx * rx , struct tsnep_rx_entry * entry ,
727
+ struct page * page )
728
+ {
729
+ entry -> page = page ;
730
+ entry -> len = TSNEP_MAX_RX_BUF_SIZE ;
731
+ entry -> dma = page_pool_get_dma_addr (entry -> page );
732
+ entry -> desc -> rx = __cpu_to_le64 (entry -> dma + TSNEP_SKB_PAD );
733
+ }
734
+
735
+ static int tsnep_rx_alloc_buffer (struct tsnep_rx * rx , int index )
736
+ {
737
+ struct tsnep_rx_entry * entry = & rx -> entry [index ];
738
+ struct page * page ;
739
+
740
+ page = page_pool_dev_alloc_pages (rx -> page_pool );
741
+ if (unlikely (!page ))
742
+ return - ENOMEM ;
743
+ tsnep_rx_set_page (rx , entry , page );
744
+
745
+ return 0 ;
746
+ }
747
+
748
+ static void tsnep_rx_reuse_buffer (struct tsnep_rx * rx , int index )
749
+ {
750
+ struct tsnep_rx_entry * entry = & rx -> entry [index ];
751
+ struct tsnep_rx_entry * read = & rx -> entry [rx -> read ];
752
+
753
+ tsnep_rx_set_page (rx , entry , read -> page );
754
+ read -> page = NULL ;
755
+ }
756
+
739
757
static void tsnep_rx_activate (struct tsnep_rx * rx , int index )
740
758
{
741
759
struct tsnep_rx_entry * entry = & rx -> entry [index ];
@@ -763,6 +781,48 @@ static void tsnep_rx_activate(struct tsnep_rx *rx, int index)
763
781
entry -> desc -> properties = __cpu_to_le32 (entry -> properties );
764
782
}
765
783
784
+ static int tsnep_rx_refill (struct tsnep_rx * rx , int count , bool reuse )
785
+ {
786
+ int index ;
787
+ bool alloc_failed = false;
788
+ bool enable = false;
789
+ int i ;
790
+ int retval ;
791
+
792
+ for (i = 0 ; i < count && !alloc_failed ; i ++ ) {
793
+ index = (rx -> write + i ) % TSNEP_RING_SIZE ;
794
+
795
+ retval = tsnep_rx_alloc_buffer (rx , index );
796
+ if (unlikely (retval )) {
797
+ rx -> alloc_failed ++ ;
798
+ alloc_failed = true;
799
+
800
+ /* reuse only if no other allocation was successful */
801
+ if (i == 0 && reuse )
802
+ tsnep_rx_reuse_buffer (rx , index );
803
+ else
804
+ break ;
805
+ }
806
+
807
+ tsnep_rx_activate (rx , index );
808
+
809
+ enable = true;
810
+ }
811
+
812
+ if (enable ) {
813
+ rx -> write = (rx -> write + i ) % TSNEP_RING_SIZE ;
814
+
815
+ /* descriptor properties shall be valid before hardware is
816
+ * notified
817
+ */
818
+ dma_wmb ();
819
+
820
+ iowrite32 (TSNEP_CONTROL_RX_ENABLE , rx -> addr + TSNEP_CONTROL );
821
+ }
822
+
823
+ return i ;
824
+ }
825
+
766
826
static struct sk_buff * tsnep_build_skb (struct tsnep_rx * rx , struct page * page ,
767
827
int length )
768
828
{
@@ -798,23 +858,42 @@ static int tsnep_rx_poll(struct tsnep_rx *rx, struct napi_struct *napi,
798
858
int budget )
799
859
{
800
860
struct device * dmadev = rx -> adapter -> dmadev ;
861
+ int desc_available ;
801
862
int done = 0 ;
802
863
enum dma_data_direction dma_dir ;
803
864
struct tsnep_rx_entry * entry ;
804
- struct page * page ;
805
865
struct sk_buff * skb ;
806
866
int length ;
807
- bool enable = false;
808
- int retval ;
809
867
868
+ desc_available = tsnep_rx_desc_available (rx );
810
869
dma_dir = page_pool_get_dma_dir (rx -> page_pool );
811
870
812
- while (likely (done < budget )) {
871
+ while (likely (done < budget ) && ( rx -> read != rx -> write ) ) {
813
872
entry = & rx -> entry [rx -> read ];
814
873
if ((__le32_to_cpu (entry -> desc_wb -> properties ) &
815
874
TSNEP_DESC_OWNER_COUNTER_MASK ) !=
816
875
(entry -> properties & TSNEP_DESC_OWNER_COUNTER_MASK ))
817
876
break ;
877
+ done ++ ;
878
+
879
+ if (desc_available >= TSNEP_RING_RX_REFILL ) {
880
+ bool reuse = desc_available >= TSNEP_RING_RX_REUSE ;
881
+
882
+ desc_available -= tsnep_rx_refill (rx , desc_available ,
883
+ reuse );
884
+ if (!entry -> page ) {
885
+ /* buffer has been reused for refill to prevent
886
+ * empty RX ring, thus buffer cannot be used for
887
+ * RX processing
888
+ */
889
+ rx -> read = (rx -> read + 1 ) % TSNEP_RING_SIZE ;
890
+ desc_available ++ ;
891
+
892
+ rx -> dropped ++ ;
893
+
894
+ continue ;
895
+ }
896
+ }
818
897
819
898
/* descriptor properties shall be read first, because valid data
820
899
* is signaled there
@@ -826,49 +905,30 @@ static int tsnep_rx_poll(struct tsnep_rx *rx, struct napi_struct *napi,
826
905
TSNEP_DESC_LENGTH_MASK ;
827
906
dma_sync_single_range_for_cpu (dmadev , entry -> dma , TSNEP_SKB_PAD ,
828
907
length , dma_dir );
829
- page = entry -> page ;
830
908
831
- /* forward skb only if allocation is successful, otherwise
832
- * page is reused and frame dropped
833
- */
834
- retval = tsnep_rx_alloc_buffer (rx , entry );
835
- if (!retval ) {
836
- skb = tsnep_build_skb (rx , page , length );
837
- if (skb ) {
838
- page_pool_release_page (rx -> page_pool , page );
839
-
840
- rx -> packets ++ ;
841
- rx -> bytes += length -
842
- TSNEP_RX_INLINE_METADATA_SIZE ;
843
- if (skb -> pkt_type == PACKET_MULTICAST )
844
- rx -> multicast ++ ;
845
-
846
- napi_gro_receive (napi , skb );
847
- } else {
848
- page_pool_recycle_direct (rx -> page_pool , page );
909
+ rx -> read = (rx -> read + 1 ) % TSNEP_RING_SIZE ;
910
+ desc_available ++ ;
849
911
850
- rx -> dropped ++ ;
851
- }
852
- done ++ ;
853
- } else {
854
- rx -> dropped ++ ;
855
- }
912
+ skb = tsnep_build_skb (rx , entry -> page , length );
913
+ if (skb ) {
914
+ page_pool_release_page (rx -> page_pool , entry -> page );
856
915
857
- tsnep_rx_activate (rx , rx -> read );
916
+ rx -> packets ++ ;
917
+ rx -> bytes += length - TSNEP_RX_INLINE_METADATA_SIZE ;
918
+ if (skb -> pkt_type == PACKET_MULTICAST )
919
+ rx -> multicast ++ ;
858
920
859
- enable = true;
921
+ napi_gro_receive (napi , skb );
922
+ } else {
923
+ page_pool_recycle_direct (rx -> page_pool , entry -> page );
860
924
861
- rx -> read = (rx -> read + 1 ) % TSNEP_RING_SIZE ;
925
+ rx -> dropped ++ ;
926
+ }
927
+ entry -> page = NULL ;
862
928
}
863
929
864
- if (enable ) {
865
- /* descriptor properties shall be valid before hardware is
866
- * notified
867
- */
868
- dma_wmb ();
869
-
870
- iowrite32 (TSNEP_CONTROL_RX_ENABLE , rx -> addr + TSNEP_CONTROL );
871
- }
930
+ if (desc_available )
931
+ tsnep_rx_refill (rx , desc_available , false);
872
932
873
933
return done ;
874
934
}
@@ -877,11 +937,13 @@ static bool tsnep_rx_pending(struct tsnep_rx *rx)
877
937
{
878
938
struct tsnep_rx_entry * entry ;
879
939
880
- entry = & rx -> entry [rx -> read ];
881
- if ((__le32_to_cpu (entry -> desc_wb -> properties ) &
882
- TSNEP_DESC_OWNER_COUNTER_MASK ) ==
883
- (entry -> properties & TSNEP_DESC_OWNER_COUNTER_MASK ))
884
- return true;
940
+ if (rx -> read != rx -> write ) {
941
+ entry = & rx -> entry [rx -> read ];
942
+ if ((__le32_to_cpu (entry -> desc_wb -> properties ) &
943
+ TSNEP_DESC_OWNER_COUNTER_MASK ) ==
944
+ (entry -> properties & TSNEP_DESC_OWNER_COUNTER_MASK ))
945
+ return true;
946
+ }
885
947
886
948
return false;
887
949
}
@@ -890,7 +952,6 @@ static int tsnep_rx_open(struct tsnep_adapter *adapter, void __iomem *addr,
890
952
int queue_index , struct tsnep_rx * rx )
891
953
{
892
954
dma_addr_t dma ;
893
- int i ;
894
955
int retval ;
895
956
896
957
memset (rx , 0 , sizeof (* rx ));
@@ -908,13 +969,7 @@ static int tsnep_rx_open(struct tsnep_adapter *adapter, void __iomem *addr,
908
969
rx -> owner_counter = 1 ;
909
970
rx -> increment_owner_counter = TSNEP_RING_SIZE - 1 ;
910
971
911
- for (i = 0 ; i < TSNEP_RING_SIZE ; i ++ )
912
- tsnep_rx_activate (rx , i );
913
-
914
- /* descriptor properties shall be valid before hardware is notified */
915
- dma_wmb ();
916
-
917
- iowrite32 (TSNEP_CONTROL_RX_ENABLE , rx -> addr + TSNEP_CONTROL );
972
+ tsnep_rx_refill (rx , tsnep_rx_desc_available (rx ), false);
918
973
919
974
return 0 ;
920
975
}
0 commit comments