@@ -582,7 +582,8 @@ static struct page *__bnxt_alloc_rx_page(struct bnxt *bp, dma_addr_t *mapping,
582
582
if (!page )
583
583
return NULL ;
584
584
585
- * mapping = dma_map_page (dev , page , 0 , PAGE_SIZE , bp -> rx_dir );
585
+ * mapping = dma_map_page_attrs (dev , page , 0 , PAGE_SIZE , bp -> rx_dir ,
586
+ DMA_ATTR_WEAK_ORDERING );
586
587
if (dma_mapping_error (dev , * mapping )) {
587
588
__free_page (page );
588
589
return NULL ;
@@ -601,8 +602,9 @@ static inline u8 *__bnxt_alloc_rx_data(struct bnxt *bp, dma_addr_t *mapping,
601
602
if (!data )
602
603
return NULL ;
603
604
604
- * mapping = dma_map_single (& pdev -> dev , data + bp -> rx_dma_offset ,
605
- bp -> rx_buf_use_size , bp -> rx_dir );
605
+ * mapping = dma_map_single_attrs (& pdev -> dev , data + bp -> rx_dma_offset ,
606
+ bp -> rx_buf_use_size , bp -> rx_dir ,
607
+ DMA_ATTR_WEAK_ORDERING );
606
608
607
609
if (dma_mapping_error (& pdev -> dev , * mapping )) {
608
610
kfree (data );
@@ -705,8 +707,9 @@ static inline int bnxt_alloc_rx_page(struct bnxt *bp,
705
707
return - ENOMEM ;
706
708
}
707
709
708
- mapping = dma_map_page (& pdev -> dev , page , offset , BNXT_RX_PAGE_SIZE ,
709
- PCI_DMA_FROMDEVICE );
710
+ mapping = dma_map_page_attrs (& pdev -> dev , page , offset ,
711
+ BNXT_RX_PAGE_SIZE , PCI_DMA_FROMDEVICE ,
712
+ DMA_ATTR_WEAK_ORDERING );
710
713
if (dma_mapping_error (& pdev -> dev , mapping )) {
711
714
__free_page (page );
712
715
return - EIO ;
@@ -799,7 +802,8 @@ static struct sk_buff *bnxt_rx_page_skb(struct bnxt *bp,
799
802
return NULL ;
800
803
}
801
804
dma_addr -= bp -> rx_dma_offset ;
802
- dma_unmap_page (& bp -> pdev -> dev , dma_addr , PAGE_SIZE , bp -> rx_dir );
805
+ dma_unmap_page_attrs (& bp -> pdev -> dev , dma_addr , PAGE_SIZE , bp -> rx_dir ,
806
+ DMA_ATTR_WEAK_ORDERING );
803
807
804
808
if (unlikely (!payload ))
805
809
payload = eth_get_headlen (data_ptr , len );
@@ -841,8 +845,8 @@ static struct sk_buff *bnxt_rx_skb(struct bnxt *bp,
841
845
}
842
846
843
847
skb = build_skb (data , 0 );
844
- dma_unmap_single (& bp -> pdev -> dev , dma_addr , bp -> rx_buf_use_size ,
845
- bp -> rx_dir );
848
+ dma_unmap_single_attrs (& bp -> pdev -> dev , dma_addr , bp -> rx_buf_use_size ,
849
+ bp -> rx_dir , DMA_ATTR_WEAK_ORDERING );
846
850
if (!skb ) {
847
851
kfree (data );
848
852
return NULL ;
@@ -909,8 +913,9 @@ static struct sk_buff *bnxt_rx_pages(struct bnxt *bp, struct bnxt_napi *bnapi,
909
913
return NULL ;
910
914
}
911
915
912
- dma_unmap_page (& pdev -> dev , mapping , BNXT_RX_PAGE_SIZE ,
913
- PCI_DMA_FROMDEVICE );
916
+ dma_unmap_page_attrs (& pdev -> dev , mapping , BNXT_RX_PAGE_SIZE ,
917
+ PCI_DMA_FROMDEVICE ,
918
+ DMA_ATTR_WEAK_ORDERING );
914
919
915
920
skb -> data_len += frag_len ;
916
921
skb -> len += frag_len ;
@@ -1329,8 +1334,9 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
1329
1334
tpa_info -> mapping = new_mapping ;
1330
1335
1331
1336
skb = build_skb (data , 0 );
1332
- dma_unmap_single (& bp -> pdev -> dev , mapping , bp -> rx_buf_use_size ,
1333
- bp -> rx_dir );
1337
+ dma_unmap_single_attrs (& bp -> pdev -> dev , mapping ,
1338
+ bp -> rx_buf_use_size , bp -> rx_dir ,
1339
+ DMA_ATTR_WEAK_ORDERING );
1334
1340
1335
1341
if (!skb ) {
1336
1342
kfree (data );
@@ -1971,9 +1977,11 @@ static void bnxt_free_rx_skbs(struct bnxt *bp)
1971
1977
if (!data )
1972
1978
continue ;
1973
1979
1974
- dma_unmap_single (& pdev -> dev , tpa_info -> mapping ,
1975
- bp -> rx_buf_use_size ,
1976
- bp -> rx_dir );
1980
+ dma_unmap_single_attrs (& pdev -> dev ,
1981
+ tpa_info -> mapping ,
1982
+ bp -> rx_buf_use_size ,
1983
+ bp -> rx_dir ,
1984
+ DMA_ATTR_WEAK_ORDERING );
1977
1985
1978
1986
tpa_info -> data = NULL ;
1979
1987
@@ -1993,13 +2001,15 @@ static void bnxt_free_rx_skbs(struct bnxt *bp)
1993
2001
1994
2002
if (BNXT_RX_PAGE_MODE (bp )) {
1995
2003
mapping -= bp -> rx_dma_offset ;
1996
- dma_unmap_page (& pdev -> dev , mapping ,
1997
- PAGE_SIZE , bp -> rx_dir );
2004
+ dma_unmap_page_attrs (& pdev -> dev , mapping ,
2005
+ PAGE_SIZE , bp -> rx_dir ,
2006
+ DMA_ATTR_WEAK_ORDERING );
1998
2007
__free_page (data );
1999
2008
} else {
2000
- dma_unmap_single (& pdev -> dev , mapping ,
2001
- bp -> rx_buf_use_size ,
2002
- bp -> rx_dir );
2009
+ dma_unmap_single_attrs (& pdev -> dev , mapping ,
2010
+ bp -> rx_buf_use_size ,
2011
+ bp -> rx_dir ,
2012
+ DMA_ATTR_WEAK_ORDERING );
2003
2013
kfree (data );
2004
2014
}
2005
2015
}
@@ -2012,8 +2022,10 @@ static void bnxt_free_rx_skbs(struct bnxt *bp)
2012
2022
if (!page )
2013
2023
continue ;
2014
2024
2015
- dma_unmap_page (& pdev -> dev , rx_agg_buf -> mapping ,
2016
- BNXT_RX_PAGE_SIZE , PCI_DMA_FROMDEVICE );
2025
+ dma_unmap_page_attrs (& pdev -> dev , rx_agg_buf -> mapping ,
2026
+ BNXT_RX_PAGE_SIZE ,
2027
+ PCI_DMA_FROMDEVICE ,
2028
+ DMA_ATTR_WEAK_ORDERING );
2017
2029
2018
2030
rx_agg_buf -> page = NULL ;
2019
2031
__clear_bit (j , rxr -> rx_agg_bmap );
0 commit comments