@@ -761,7 +761,6 @@ static struct page *__bnxt_alloc_rx_page(struct bnxt *bp, dma_addr_t *mapping,
761
761
unsigned int * offset ,
762
762
gfp_t gfp )
763
763
{
764
- struct device * dev = & bp -> pdev -> dev ;
765
764
struct page * page ;
766
765
767
766
if (PAGE_SIZE > BNXT_RX_PAGE_SIZE ) {
@@ -774,12 +773,7 @@ static struct page *__bnxt_alloc_rx_page(struct bnxt *bp, dma_addr_t *mapping,
774
773
if (!page )
775
774
return NULL ;
776
775
777
- * mapping = dma_map_page_attrs (dev , page , * offset , BNXT_RX_PAGE_SIZE ,
778
- bp -> rx_dir , DMA_ATTR_WEAK_ORDERING );
779
- if (dma_mapping_error (dev , * mapping )) {
780
- page_pool_recycle_direct (rxr -> page_pool , page );
781
- return NULL ;
782
- }
776
+ * mapping = page_pool_get_dma_addr (page ) + * offset ;
783
777
return page ;
784
778
}
785
779
@@ -998,8 +992,8 @@ static struct sk_buff *bnxt_rx_multi_page_skb(struct bnxt *bp,
998
992
return NULL ;
999
993
}
1000
994
dma_addr -= bp -> rx_dma_offset ;
1001
- dma_unmap_page_attrs (& bp -> pdev -> dev , dma_addr , BNXT_RX_PAGE_SIZE ,
1002
- bp -> rx_dir , DMA_ATTR_WEAK_ORDERING );
995
+ dma_sync_single_for_cpu (& bp -> pdev -> dev , dma_addr , BNXT_RX_PAGE_SIZE ,
996
+ bp -> rx_dir );
1003
997
skb = build_skb (data_ptr - bp -> rx_offset , BNXT_RX_PAGE_SIZE );
1004
998
if (!skb ) {
1005
999
page_pool_recycle_direct (rxr -> page_pool , page );
@@ -1032,8 +1026,8 @@ static struct sk_buff *bnxt_rx_page_skb(struct bnxt *bp,
1032
1026
return NULL ;
1033
1027
}
1034
1028
dma_addr -= bp -> rx_dma_offset ;
1035
- dma_unmap_page_attrs (& bp -> pdev -> dev , dma_addr , BNXT_RX_PAGE_SIZE ,
1036
- bp -> rx_dir , DMA_ATTR_WEAK_ORDERING );
1029
+ dma_sync_single_for_cpu (& bp -> pdev -> dev , dma_addr , BNXT_RX_PAGE_SIZE ,
1030
+ bp -> rx_dir );
1037
1031
1038
1032
if (unlikely (!payload ))
1039
1033
payload = eth_get_headlen (bp -> dev , data_ptr , len );
@@ -1149,9 +1143,8 @@ static u32 __bnxt_rx_agg_pages(struct bnxt *bp,
1149
1143
return 0 ;
1150
1144
}
1151
1145
1152
- dma_unmap_page_attrs (& pdev -> dev , mapping , BNXT_RX_PAGE_SIZE ,
1153
- bp -> rx_dir ,
1154
- DMA_ATTR_WEAK_ORDERING );
1146
+ dma_sync_single_for_cpu (& pdev -> dev , mapping , BNXT_RX_PAGE_SIZE ,
1147
+ bp -> rx_dir );
1155
1148
1156
1149
total_frag_len += frag_len ;
1157
1150
prod = NEXT_RX_AGG (prod );
@@ -2947,10 +2940,6 @@ static void bnxt_free_one_rx_ring_skbs(struct bnxt *bp, int ring_nr)
2947
2940
2948
2941
rx_buf -> data = NULL ;
2949
2942
if (BNXT_RX_PAGE_MODE (bp )) {
2950
- mapping -= bp -> rx_dma_offset ;
2951
- dma_unmap_page_attrs (& pdev -> dev , mapping ,
2952
- BNXT_RX_PAGE_SIZE , bp -> rx_dir ,
2953
- DMA_ATTR_WEAK_ORDERING );
2954
2943
page_pool_recycle_direct (rxr -> page_pool , data );
2955
2944
} else {
2956
2945
dma_unmap_single_attrs (& pdev -> dev , mapping ,
@@ -2971,9 +2960,6 @@ static void bnxt_free_one_rx_ring_skbs(struct bnxt *bp, int ring_nr)
2971
2960
if (!page )
2972
2961
continue ;
2973
2962
2974
- dma_unmap_page_attrs (& pdev -> dev , rx_agg_buf -> mapping ,
2975
- BNXT_RX_PAGE_SIZE , bp -> rx_dir ,
2976
- DMA_ATTR_WEAK_ORDERING );
2977
2963
rx_agg_buf -> page = NULL ;
2978
2964
__clear_bit (i , rxr -> rx_agg_bmap );
2979
2965
@@ -3205,7 +3191,9 @@ static int bnxt_alloc_rx_page_pool(struct bnxt *bp,
3205
3191
pp .nid = dev_to_node (& bp -> pdev -> dev );
3206
3192
pp .napi = & rxr -> bnapi -> napi ;
3207
3193
pp .dev = & bp -> pdev -> dev ;
3208
- pp .dma_dir = DMA_BIDIRECTIONAL ;
3194
+ pp .dma_dir = bp -> rx_dir ;
3195
+ pp .max_len = PAGE_SIZE ;
3196
+ pp .flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV ;
3209
3197
if (PAGE_SIZE > BNXT_RX_PAGE_SIZE )
3210
3198
pp .flags |= PP_FLAG_PAGE_FRAG ;
3211
3199
0 commit comments