@@ -877,48 +877,15 @@ static inline int bnxt_alloc_rx_page(struct bnxt *bp,
877
877
struct rx_bd * rxbd =
878
878
& rxr -> rx_agg_desc_ring [RX_RING (prod )][RX_IDX (prod )];
879
879
struct bnxt_sw_rx_agg_bd * rx_agg_buf ;
880
- struct pci_dev * pdev = bp -> pdev ;
881
880
struct page * page ;
882
881
dma_addr_t mapping ;
883
882
u16 sw_prod = rxr -> rx_sw_agg_prod ;
884
883
unsigned int offset = 0 ;
885
884
886
- if (BNXT_RX_PAGE_MODE (bp )) {
887
- page = __bnxt_alloc_rx_page (bp , & mapping , rxr , & offset , gfp );
888
-
889
- if (!page )
890
- return - ENOMEM ;
891
-
892
- } else {
893
- if (PAGE_SIZE > BNXT_RX_PAGE_SIZE ) {
894
- page = rxr -> rx_page ;
895
- if (!page ) {
896
- page = alloc_page (gfp );
897
- if (!page )
898
- return - ENOMEM ;
899
- rxr -> rx_page = page ;
900
- rxr -> rx_page_offset = 0 ;
901
- }
902
- offset = rxr -> rx_page_offset ;
903
- rxr -> rx_page_offset += BNXT_RX_PAGE_SIZE ;
904
- if (rxr -> rx_page_offset == PAGE_SIZE )
905
- rxr -> rx_page = NULL ;
906
- else
907
- get_page (page );
908
- } else {
909
- page = alloc_page (gfp );
910
- if (!page )
911
- return - ENOMEM ;
912
- }
885
+ page = __bnxt_alloc_rx_page (bp , & mapping , rxr , & offset , gfp );
913
886
914
- mapping = dma_map_page_attrs (& pdev -> dev , page , offset ,
915
- BNXT_RX_PAGE_SIZE , DMA_FROM_DEVICE ,
916
- DMA_ATTR_WEAK_ORDERING );
917
- if (dma_mapping_error (& pdev -> dev , mapping )) {
918
- __free_page (page );
919
- return - EIO ;
920
- }
921
- }
887
+ if (!page )
888
+ return - ENOMEM ;
922
889
923
890
if (unlikely (test_bit (sw_prod , rxr -> rx_agg_bmap )))
924
891
sw_prod = bnxt_find_next_agg_idx (rxr , sw_prod );
@@ -1204,6 +1171,7 @@ static struct sk_buff *bnxt_rx_agg_pages_skb(struct bnxt *bp,
1204
1171
total_frag_len = __bnxt_rx_agg_pages (bp , cpr , shinfo , idx ,
1205
1172
agg_bufs , tpa , NULL );
1206
1173
if (!total_frag_len ) {
1174
+ skb_mark_for_recycle (skb );
1207
1175
dev_kfree_skb (skb );
1208
1176
return NULL ;
1209
1177
}
@@ -1794,6 +1762,7 @@ static void bnxt_deliver_skb(struct bnxt *bp, struct bnxt_napi *bnapi,
1794
1762
return ;
1795
1763
}
1796
1764
skb_record_rx_queue (skb , bnapi -> index );
1765
+ skb_mark_for_recycle (skb );
1797
1766
napi_gro_receive (& bnapi -> napi , skb );
1798
1767
}
1799
1768
@@ -3002,30 +2971,16 @@ static void bnxt_free_one_rx_ring_skbs(struct bnxt *bp, int ring_nr)
3002
2971
if (!page )
3003
2972
continue ;
3004
2973
3005
- if (BNXT_RX_PAGE_MODE (bp )) {
3006
- dma_unmap_page_attrs (& pdev -> dev , rx_agg_buf -> mapping ,
3007
- BNXT_RX_PAGE_SIZE , bp -> rx_dir ,
3008
- DMA_ATTR_WEAK_ORDERING );
3009
- rx_agg_buf -> page = NULL ;
3010
- __clear_bit (i , rxr -> rx_agg_bmap );
3011
-
3012
- page_pool_recycle_direct (rxr -> page_pool , page );
3013
- } else {
3014
- dma_unmap_page_attrs (& pdev -> dev , rx_agg_buf -> mapping ,
3015
- BNXT_RX_PAGE_SIZE , DMA_FROM_DEVICE ,
3016
- DMA_ATTR_WEAK_ORDERING );
3017
- rx_agg_buf -> page = NULL ;
3018
- __clear_bit (i , rxr -> rx_agg_bmap );
2974
+ dma_unmap_page_attrs (& pdev -> dev , rx_agg_buf -> mapping ,
2975
+ BNXT_RX_PAGE_SIZE , bp -> rx_dir ,
2976
+ DMA_ATTR_WEAK_ORDERING );
2977
+ rx_agg_buf -> page = NULL ;
2978
+ __clear_bit (i , rxr -> rx_agg_bmap );
3019
2979
3020
- __free_page (page );
3021
- }
2980
+ page_pool_recycle_direct (rxr -> page_pool , page );
3022
2981
}
3023
2982
3024
2983
skip_rx_agg_free :
3025
- if (rxr -> rx_page ) {
3026
- __free_page (rxr -> rx_page );
3027
- rxr -> rx_page = NULL ;
3028
- }
3029
2984
map = rxr -> rx_tpa_idx_map ;
3030
2985
if (map )
3031
2986
memset (map -> agg_idx_bmap , 0 , sizeof (map -> agg_idx_bmap ));
@@ -3244,7 +3199,9 @@ static int bnxt_alloc_rx_page_pool(struct bnxt *bp,
3244
3199
{
3245
3200
struct page_pool_params pp = { 0 };
3246
3201
3247
- pp .pool_size = bp -> rx_ring_size ;
3202
+ pp .pool_size = bp -> rx_agg_ring_size ;
3203
+ if (BNXT_RX_PAGE_MODE (bp ))
3204
+ pp .pool_size += bp -> rx_ring_size ;
3248
3205
pp .nid = dev_to_node (& bp -> pdev -> dev );
3249
3206
pp .napi = & rxr -> bnapi -> napi ;
3250
3207
pp .dev = & bp -> pdev -> dev ;
0 commit comments