@@ -739,7 +739,6 @@ static struct page *__bnxt_alloc_rx_page(struct bnxt *bp, dma_addr_t *mapping,
739
739
page_pool_recycle_direct (rxr -> page_pool , page );
740
740
return NULL ;
741
741
}
742
- * mapping += bp -> rx_dma_offset ;
743
742
return page ;
744
743
}
745
744
@@ -781,6 +780,7 @@ int bnxt_alloc_rx_data(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
781
780
if (!page )
782
781
return - ENOMEM ;
783
782
783
+ mapping += bp -> rx_dma_offset ;
784
784
rx_buf -> data = page ;
785
785
rx_buf -> data_ptr = page_address (page ) + bp -> rx_offset ;
786
786
} else {
@@ -841,33 +841,41 @@ static inline int bnxt_alloc_rx_page(struct bnxt *bp,
841
841
u16 sw_prod = rxr -> rx_sw_agg_prod ;
842
842
unsigned int offset = 0 ;
843
843
844
- if (PAGE_SIZE > BNXT_RX_PAGE_SIZE ) {
845
- page = rxr -> rx_page ;
846
- if (!page ) {
844
+ if (BNXT_RX_PAGE_MODE (bp )) {
845
+ page = __bnxt_alloc_rx_page (bp , & mapping , rxr , gfp );
846
+
847
+ if (!page )
848
+ return - ENOMEM ;
849
+
850
+ } else {
851
+ if (PAGE_SIZE > BNXT_RX_PAGE_SIZE ) {
852
+ page = rxr -> rx_page ;
853
+ if (!page ) {
854
+ page = alloc_page (gfp );
855
+ if (!page )
856
+ return - ENOMEM ;
857
+ rxr -> rx_page = page ;
858
+ rxr -> rx_page_offset = 0 ;
859
+ }
860
+ offset = rxr -> rx_page_offset ;
861
+ rxr -> rx_page_offset += BNXT_RX_PAGE_SIZE ;
862
+ if (rxr -> rx_page_offset == PAGE_SIZE )
863
+ rxr -> rx_page = NULL ;
864
+ else
865
+ get_page (page );
866
+ } else {
847
867
page = alloc_page (gfp );
848
868
if (!page )
849
869
return - ENOMEM ;
850
- rxr -> rx_page = page ;
851
- rxr -> rx_page_offset = 0 ;
852
870
}
853
- offset = rxr -> rx_page_offset ;
854
- rxr -> rx_page_offset += BNXT_RX_PAGE_SIZE ;
855
- if (rxr -> rx_page_offset == PAGE_SIZE )
856
- rxr -> rx_page = NULL ;
857
- else
858
- get_page (page );
859
- } else {
860
- page = alloc_page (gfp );
861
- if (!page )
862
- return - ENOMEM ;
863
- }
864
871
865
- mapping = dma_map_page_attrs (& pdev -> dev , page , offset ,
866
- BNXT_RX_PAGE_SIZE , DMA_FROM_DEVICE ,
867
- DMA_ATTR_WEAK_ORDERING );
868
- if (dma_mapping_error (& pdev -> dev , mapping )) {
869
- __free_page (page );
870
- return - EIO ;
872
+ mapping = dma_map_page_attrs (& pdev -> dev , page , offset ,
873
+ BNXT_RX_PAGE_SIZE , DMA_FROM_DEVICE ,
874
+ DMA_ATTR_WEAK_ORDERING );
875
+ if (dma_mapping_error (& pdev -> dev , mapping )) {
876
+ __free_page (page );
877
+ return - EIO ;
878
+ }
871
879
}
872
880
873
881
if (unlikely (test_bit (sw_prod , rxr -> rx_agg_bmap )))
@@ -1105,7 +1113,7 @@ static u32 __bnxt_rx_agg_pages(struct bnxt *bp,
1105
1113
}
1106
1114
1107
1115
dma_unmap_page_attrs (& pdev -> dev , mapping , BNXT_RX_PAGE_SIZE ,
1108
- DMA_FROM_DEVICE ,
1116
+ bp -> rx_dir ,
1109
1117
DMA_ATTR_WEAK_ORDERING );
1110
1118
1111
1119
total_frag_len += frag_len ;
@@ -2936,14 +2944,23 @@ static void bnxt_free_one_rx_ring_skbs(struct bnxt *bp, int ring_nr)
2936
2944
if (!page )
2937
2945
continue ;
2938
2946
2939
- dma_unmap_page_attrs (& pdev -> dev , rx_agg_buf -> mapping ,
2940
- BNXT_RX_PAGE_SIZE , DMA_FROM_DEVICE ,
2941
- DMA_ATTR_WEAK_ORDERING );
2947
+ if (BNXT_RX_PAGE_MODE (bp )) {
2948
+ dma_unmap_page_attrs (& pdev -> dev , rx_agg_buf -> mapping ,
2949
+ BNXT_RX_PAGE_SIZE , bp -> rx_dir ,
2950
+ DMA_ATTR_WEAK_ORDERING );
2951
+ rx_agg_buf -> page = NULL ;
2952
+ __clear_bit (i , rxr -> rx_agg_bmap );
2942
2953
2943
- rx_agg_buf -> page = NULL ;
2944
- __clear_bit (i , rxr -> rx_agg_bmap );
2954
+ page_pool_recycle_direct (rxr -> page_pool , page );
2955
+ } else {
2956
+ dma_unmap_page_attrs (& pdev -> dev , rx_agg_buf -> mapping ,
2957
+ BNXT_RX_PAGE_SIZE , DMA_FROM_DEVICE ,
2958
+ DMA_ATTR_WEAK_ORDERING );
2959
+ rx_agg_buf -> page = NULL ;
2960
+ __clear_bit (i , rxr -> rx_agg_bmap );
2945
2961
2946
- __free_page (page );
2962
+ __free_page (page );
2963
+ }
2947
2964
}
2948
2965
2949
2966
skip_rx_agg_free :
0 commit comments